Merge branch 'livekit' into valere/auto_fit_based_on_video_ratio

This commit is contained in:
Valere
2026-03-02 14:31:47 +01:00
38 changed files with 2291 additions and 2033 deletions

View File

@@ -0,0 +1,32 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type LocalParticipant } from "livekit-client";
import { type Behavior } from "../Behavior";
import {
type BaseScreenShareInputs,
type BaseScreenShareViewModel,
createBaseScreenShare,
} from "./ScreenShareViewModel";
import { type ObservableScope } from "../ObservableScope";
export interface LocalScreenShareViewModel extends BaseScreenShareViewModel {
local: true;
}
export interface LocalScreenShareInputs extends BaseScreenShareInputs {
participant$: Behavior<LocalParticipant | null>;
}
export function createLocalScreenShare(
scope: ObservableScope,
inputs: LocalScreenShareInputs,
): LocalScreenShareViewModel {
return { ...createBaseScreenShare(scope, inputs), local: true };
}

View File

@@ -0,0 +1,137 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import {
facingModeFromLocalTrack,
type LocalParticipant,
LocalVideoTrack,
TrackEvent,
} from "livekit-client";
import {
fromEvent,
map,
merge,
type Observable,
of,
startWith,
switchMap,
} from "rxjs";
import { logger } from "matrix-js-sdk/lib/logger";
import { type Behavior } from "../Behavior";
import {
type BaseUserMediaInputs,
type BaseUserMediaViewModel,
createBaseUserMedia,
} from "./UserMediaViewModel";
import { type ObservableScope } from "../ObservableScope";
import { alwaysShowSelf } from "../../settings/settings";
import { platform } from "../../Platform";
import { type MediaDevices } from "../MediaDevices";
export interface LocalUserMediaViewModel extends BaseUserMediaViewModel {
local: true;
/**
* Whether the video should be mirrored.
*/
mirror$: Behavior<boolean>;
/**
* Whether to show this tile in a highly visible location near the start of
* the grid.
*/
alwaysShow$: Behavior<boolean>;
setAlwaysShow: (value: boolean) => void;
switchCamera$: Behavior<(() => void) | null>;
}
export interface LocalUserMediaInputs extends Omit<
BaseUserMediaInputs,
"statsType"
> {
participant$: Behavior<LocalParticipant | null>;
mediaDevices: MediaDevices;
}
export function createLocalUserMedia(
scope: ObservableScope,
{ mediaDevices, ...inputs }: LocalUserMediaInputs,
): LocalUserMediaViewModel {
const baseUserMedia = createBaseUserMedia(scope, {
...inputs,
statsType: "outbound-rtp",
});
/**
* The local video track as an observable that emits whenever the track
* changes, the camera is switched, or the track is muted.
*/
const videoTrack$: Observable<LocalVideoTrack | null> =
baseUserMedia.video$.pipe(
switchMap((v) => {
const track = v?.publication.track;
if (!(track instanceof LocalVideoTrack)) return of(null);
return merge(
// Watch for track restarts because they indicate a camera switch.
// This event is also emitted when unmuting the track object.
fromEvent(track, TrackEvent.Restarted).pipe(
startWith(null),
map(() => track),
),
// When the track object is muted, reset it to null.
fromEvent(track, TrackEvent.Muted).pipe(map(() => null)),
);
}),
);
return {
...baseUserMedia,
local: true,
mirror$: scope.behavior(
videoTrack$.pipe(
// Mirror only front-facing cameras (those that face the user)
map(
(track) =>
track !== null &&
facingModeFromLocalTrack(track).facingMode === "user",
),
),
),
alwaysShow$: alwaysShowSelf.value$,
setAlwaysShow: alwaysShowSelf.setValue,
switchCamera$: scope.behavior(
platform === "desktop"
? of(null)
: videoTrack$.pipe(
map((track) => {
if (track === null) return null;
const facingMode = facingModeFromLocalTrack(track).facingMode;
// If the camera isn't front or back-facing, don't provide a switch
// camera shortcut at all
if (facingMode !== "user" && facingMode !== "environment")
return null;
// Restart the track with a camera facing the opposite direction
return (): void =>
void track
.restartTrack({
facingMode: facingMode === "user" ? "environment" : "user",
})
.then(() => {
// Inform the MediaDevices which camera was chosen
const deviceId =
track.mediaStreamTrack.getSettings().deviceId;
if (deviceId !== undefined)
mediaDevices.videoInput.select(deviceId);
})
.catch((e) =>
logger.error("Failed to switch camera", facingMode, e),
);
}),
),
),
};
}

View File

@@ -0,0 +1,198 @@
/*
Copyright 2025-2026 Element Software Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { combineLatest, map, of, switchMap } from "rxjs";
import {
type LocalParticipant,
ParticipantEvent,
type RemoteParticipant,
} from "livekit-client";
import { observeParticipantEvents } from "@livekit/components-core";
import { type ObservableScope } from "../ObservableScope.ts";
import type { Behavior } from "../Behavior.ts";
import type { MediaDevices } from "../MediaDevices.ts";
import { observeSpeaker$ } from "./observeSpeaker.ts";
import { generateItems } from "../../utils/observable.ts";
import { type TaggedParticipant } from "../CallViewModel/remoteMembers/MatrixLivekitMembers.ts";
import { type UserMediaViewModel } from "./UserMediaViewModel.ts";
import { type ScreenShareViewModel } from "./ScreenShareViewModel.ts";
import {
createLocalUserMedia,
type LocalUserMediaInputs,
} from "./LocalUserMediaViewModel.ts";
import {
createRemoteUserMedia,
type RemoteUserMediaInputs,
} from "./RemoteUserMediaViewModel.ts";
import { createLocalScreenShare } from "./LocalScreenShareViewModel.ts";
import { createRemoteScreenShare } from "./RemoteScreenShareViewModel.ts";
/**
* Sorting bins defining the order in which media tiles appear in the layout.
*/
enum SortingBin {
/**
* Yourself, when the "always show self" option is on.
*/
SelfAlwaysShown,
/**
* Participants that are sharing their screen.
*/
Presenters,
/**
* Participants that have been speaking recently.
*/
Speakers,
/**
* Participants that have their hand raised.
*/
HandRaised,
/**
* Participants with video.
*/
Video,
/**
* Participants not sharing any video.
*/
NoVideo,
/**
* Yourself, when the "always show self" option is off.
*/
SelfNotAlwaysShown,
}
/**
* A user media item to be presented in a tile. This is a thin wrapper around
* UserMediaViewModel which additionally carries data relevant to the tile
* layout algorithms (data which the MediaView component should be ignorant of).
*/
export type WrappedUserMediaViewModel = UserMediaViewModel & {
/**
* All screen share media associated with this user media.
*/
screenShares$: Behavior<ScreenShareViewModel[]>;
/**
* Which sorting bin the media item should be placed in.
*/
bin$: Behavior<SortingBin>;
};
interface WrappedUserMediaInputs extends Omit<
LocalUserMediaInputs & RemoteUserMediaInputs,
"participant$"
> {
participant: TaggedParticipant;
mediaDevices: MediaDevices;
pretendToBeDisconnected$: Behavior<boolean>;
}
export function createWrappedUserMedia(
scope: ObservableScope,
{
participant,
mediaDevices,
pretendToBeDisconnected$,
...inputs
}: WrappedUserMediaInputs,
): WrappedUserMediaViewModel {
const userMedia =
participant.type === "local"
? createLocalUserMedia(scope, {
participant$: participant.value$,
mediaDevices,
...inputs,
})
: createRemoteUserMedia(scope, {
participant$: participant.value$,
pretendToBeDisconnected$,
...inputs,
});
// TypeScript needs this widening of the type to happen in a separate statement
const participant$: Behavior<LocalParticipant | RemoteParticipant | null> =
participant.value$;
const screenShares$ = scope.behavior(
participant$.pipe(
switchMap((p) =>
p === null
? of([])
: observeParticipantEvents(
p,
ParticipantEvent.TrackPublished,
ParticipantEvent.TrackUnpublished,
ParticipantEvent.LocalTrackPublished,
ParticipantEvent.LocalTrackUnpublished,
).pipe(
// Technically more than one screen share might be possible... our
// MediaViewModels don't support it though since they look for a unique
// track for the given source. So generateItems here is a bit overkill.
generateItems(
`${inputs.id} screenShares$`,
function* (p) {
if (p.isScreenShareEnabled)
yield {
keys: ["screen-share"],
data: undefined,
};
},
(scope, _data$, key) => {
const id = `${inputs.id}:${key}`;
return participant.type === "local"
? createLocalScreenShare(scope, {
...inputs,
id,
participant$: participant.value$,
})
: createRemoteScreenShare(scope, {
...inputs,
id,
participant$: participant.value$,
pretendToBeDisconnected$,
});
},
),
),
),
),
);
const speaker$ = scope.behavior(observeSpeaker$(userMedia.speaking$));
const presenter$ = scope.behavior(
screenShares$.pipe(map((screenShares) => screenShares.length > 0)),
);
return {
...userMedia,
screenShares$,
bin$: scope.behavior(
combineLatest(
[
speaker$,
presenter$,
userMedia.videoEnabled$,
userMedia.handRaised$,
userMedia.local ? userMedia.alwaysShow$ : of<boolean | null>(null),
],
(speaker, presenter, video, handRaised, alwaysShow) => {
if (alwaysShow !== null)
return alwaysShow
? SortingBin.SelfAlwaysShown
: SortingBin.SelfNotAlwaysShown;
else if (presenter) return SortingBin.Presenters;
else if (speaker) return SortingBin.Speakers;
else if (handRaised) return SortingBin.HandRaised;
else if (video) return SortingBin.Video;
else return SortingBin.NoVideo;
},
),
),
};
}
export type MediaItem = WrappedUserMediaViewModel | ScreenShareViewModel;

View File

@@ -0,0 +1,222 @@
/*
Copyright 2024 New Vector Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { expect, onTestFinished, test, vi } from "vitest";
import {
type LocalTrackPublication,
LocalVideoTrack,
TrackEvent,
} from "livekit-client";
import { waitFor } from "@testing-library/dom";
import {
mockLocalParticipant,
mockMediaDevices,
mockRtcMembership,
mockLocalMedia,
mockRemoteMedia,
withTestScheduler,
mockRemoteParticipant,
} from "../../utils/test";
import { constant } from "../Behavior";
global.MediaStreamTrack = class {} as unknown as {
new (): MediaStreamTrack;
prototype: MediaStreamTrack;
};
global.MediaStream = class {} as unknown as {
new (): MediaStream;
prototype: MediaStream;
};
const platformMock = vi.hoisted(() => vi.fn(() => "desktop"));
vi.mock("../../Platform", () => ({
get platform(): string {
return platformMock();
},
}));
const rtcMembership = mockRtcMembership("@alice:example.org", "AAAA");
test("control a participant's volume", () => {
const setVolumeSpy = vi.fn();
const vm = mockRemoteMedia(
rtcMembership,
{},
mockRemoteParticipant({ setVolume: setVolumeSpy }),
);
withTestScheduler(({ expectObservable, schedule }) => {
schedule("-ab---c---d|", {
a() {
// Try muting by toggling
vm.togglePlaybackMuted();
expect(setVolumeSpy).toHaveBeenLastCalledWith(0);
},
b() {
// Try unmuting by dragging the slider back up
vm.adjustPlaybackVolume(0.6);
vm.adjustPlaybackVolume(0.8);
vm.commitPlaybackVolume();
expect(setVolumeSpy).toHaveBeenCalledWith(0.6);
expect(setVolumeSpy).toHaveBeenLastCalledWith(0.8);
},
c() {
// Try muting by dragging the slider back down
vm.adjustPlaybackVolume(0.2);
vm.adjustPlaybackVolume(0);
vm.commitPlaybackVolume();
expect(setVolumeSpy).toHaveBeenCalledWith(0.2);
expect(setVolumeSpy).toHaveBeenLastCalledWith(0);
},
d() {
// Try unmuting by toggling
vm.togglePlaybackMuted();
// The volume should return to the last non-zero committed volume
expect(setVolumeSpy).toHaveBeenLastCalledWith(0.8);
},
});
expectObservable(vm.playbackVolume$).toBe("ab(cd)(ef)g", {
a: 1,
b: 0,
c: 0.6,
d: 0.8,
e: 0.2,
f: 0,
g: 0.8,
});
});
});
test("local media remembers whether it should always be shown", () => {
const vm1 = mockLocalMedia(
rtcMembership,
{},
mockLocalParticipant({}),
mockMediaDevices({}),
);
withTestScheduler(({ expectObservable, schedule }) => {
schedule("-a|", { a: () => vm1.setAlwaysShow(false) });
expectObservable(vm1.alwaysShow$).toBe("ab", { a: true, b: false });
});
// Next local media should start out *not* always shown
const vm2 = mockLocalMedia(
rtcMembership,
{},
mockLocalParticipant({}),
mockMediaDevices({}),
);
withTestScheduler(({ expectObservable, schedule }) => {
schedule("-a|", { a: () => vm2.setAlwaysShow(true) });
expectObservable(vm2.alwaysShow$).toBe("ab", { a: false, b: true });
});
});
test("switch cameras", async () => {
// Camera switching is only available on mobile
platformMock.mockReturnValue("android");
onTestFinished(() => void platformMock.mockReset());
// Construct a mock video track which knows how to be restarted
const track = new LocalVideoTrack({
getConstraints() {},
addEventListener() {},
removeEventListener() {},
} as unknown as MediaStreamTrack);
let deviceId = "front camera";
const restartTrack = vi.fn(async ({ facingMode }) => {
deviceId = facingMode === "user" ? "front camera" : "back camera";
track.emit(TrackEvent.Restarted);
return Promise.resolve();
});
track.restartTrack = restartTrack;
Object.defineProperty(track, "mediaStreamTrack", {
get() {
return {
label: "Video",
getSettings: (): object => ({
deviceId,
facingMode: deviceId === "front camera" ? "user" : "environment",
}),
};
},
});
const selectVideoInput = vi.fn();
const vm = mockLocalMedia(
rtcMembership,
{},
mockLocalParticipant({
getTrackPublication() {
return { track } as unknown as LocalTrackPublication;
},
}),
mockMediaDevices({
videoInput: {
available$: constant(new Map()),
selected$: constant(undefined),
select: selectVideoInput,
},
}),
);
// Switch to back camera
vm.switchCamera$.value!();
expect(restartTrack).toHaveBeenCalledExactlyOnceWith({
facingMode: "environment",
});
await waitFor(() => {
expect(selectVideoInput).toHaveBeenCalledTimes(1);
expect(selectVideoInput).toHaveBeenCalledWith("back camera");
});
expect(deviceId).toBe("back camera");
// Switch to front camera
vm.switchCamera$.value!();
expect(restartTrack).toHaveBeenCalledTimes(2);
expect(restartTrack).toHaveBeenLastCalledWith({ facingMode: "user" });
await waitFor(() => {
expect(selectVideoInput).toHaveBeenCalledTimes(2);
expect(selectVideoInput).toHaveBeenLastCalledWith("front camera");
});
expect(deviceId).toBe("front camera");
});
test("remote media is in waiting state when participant has not yet connected", () => {
const vm = mockRemoteMedia(rtcMembership, {}, null); // null participant
expect(vm.waitingForMedia$.value).toBe(true);
});
test("remote media is not in waiting state when participant is connected", () => {
const vm = mockRemoteMedia(rtcMembership, {}, mockRemoteParticipant({}));
expect(vm.waitingForMedia$.value).toBe(false);
});
test("remote media is not in waiting state when participant is connected with no publications", () => {
const vm = mockRemoteMedia(
rtcMembership,
{},
mockRemoteParticipant({
getTrackPublication: () => undefined,
getTrackPublications: () => [],
}),
);
expect(vm.waitingForMedia$.value).toBe(false);
});
test("remote media is not in waiting state when user does not intend to publish anywhere", () => {
const vm = mockRemoteMedia(
rtcMembership,
{},
mockRemoteParticipant({}),
undefined, // No room (no advertised transport)
);
expect(vm.waitingForMedia$.value).toBe(false);
});

View File

@@ -0,0 +1,44 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type Behavior } from "../Behavior";
import { type ScreenShareViewModel } from "./ScreenShareViewModel";
import { type UserMediaViewModel } from "./UserMediaViewModel";
/**
* A participant's media.
*/
export type MediaViewModel = UserMediaViewModel | ScreenShareViewModel;
/**
* Properties which are common to all MediaViewModels.
*/
export interface BaseMediaViewModel {
/**
* An opaque identifier for this media.
*/
id: string;
/**
* The Matrix user to which this media belongs.
*/
userId: string;
displayName$: Behavior<string>;
mxcAvatarUrl$: Behavior<string | undefined>;
}
export type BaseMediaInputs = BaseMediaViewModel;
// All this function does is strip out superfluous data from the input object
export function createBaseMedia({
id,
userId,
displayName$,
mxcAvatarUrl$,
}: BaseMediaInputs): BaseMediaViewModel {
return { id, userId, displayName$, mxcAvatarUrl$ };
}

View File

@@ -0,0 +1,272 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import {
type Room as LivekitRoom,
RoomEvent as LivekitRoomEvent,
type Participant,
type Track,
} from "livekit-client";
import {
type AudioSource,
roomEventSelector,
type TrackReference,
type VideoSource,
} from "@livekit/components-core";
import { type LocalParticipant, type RemoteParticipant } from "livekit-client";
import {
combineLatest,
distinctUntilChanged,
filter,
map,
type Observable,
of,
startWith,
switchMap,
throttleTime,
} from "rxjs";
import { type Behavior } from "../Behavior";
import { type BaseMediaViewModel, createBaseMedia } from "./MediaViewModel";
import { type EncryptionSystem } from "../../e2ee/sharedKeyManagement";
import { type ObservableScope } from "../ObservableScope";
import { observeTrackReference$ } from "../observeTrackReference";
import { E2eeType } from "../../e2ee/e2eeType";
import { observeInboundRtpStreamStats$ } from "./observeRtpStreamStats";
// TODO: Encryption status is kinda broken and thus unused right now. Remove?
export enum EncryptionStatus {
Connecting,
Okay,
KeyMissing,
KeyInvalid,
PasswordInvalid,
}
/**
* Media belonging to an active member of the RTC session.
*/
export interface MemberMediaViewModel extends BaseMediaViewModel {
/**
* The LiveKit video track for this media.
*/
video$: Behavior<TrackReference | undefined>;
/**
* The URL of the LiveKit focus on which this member should be publishing.
* Exposed for debugging.
*/
focusUrl$: Behavior<string | undefined>;
/**
* Whether there should be a warning that this media is unencrypted.
*/
unencryptedWarning$: Behavior<boolean>;
encryptionStatus$: Behavior<EncryptionStatus>;
}
export interface MemberMediaInputs extends BaseMediaViewModel {
participant$: Behavior<LocalParticipant | RemoteParticipant | null>;
livekitRoom$: Behavior<LivekitRoom | undefined>;
audioSource: AudioSource;
videoSource: VideoSource;
focusUrl$: Behavior<string | undefined>;
encryptionSystem: EncryptionSystem;
}
export function createMemberMedia(
scope: ObservableScope,
{
participant$,
livekitRoom$,
audioSource,
videoSource,
focusUrl$,
encryptionSystem,
...inputs
}: MemberMediaInputs,
): MemberMediaViewModel {
const trackBehavior$ = (
source: Track.Source,
): Behavior<TrackReference | undefined> =>
scope.behavior(
participant$.pipe(
switchMap((p) =>
!p ? of(undefined) : observeTrackReference$(p, source),
),
),
);
const audio$ = trackBehavior$(audioSource);
const video$ = trackBehavior$(videoSource);
return {
...createBaseMedia(inputs),
video$,
focusUrl$,
unencryptedWarning$: scope.behavior(
combineLatest(
[audio$, video$],
(a, v) =>
encryptionSystem.kind !== E2eeType.NONE &&
(a?.publication.isEncrypted === false ||
v?.publication.isEncrypted === false),
),
),
encryptionStatus$: scope.behavior(
participant$.pipe(
switchMap((participant): Observable<EncryptionStatus> => {
if (!participant) {
return of(EncryptionStatus.Connecting);
} else if (
participant.isLocal ||
encryptionSystem.kind === E2eeType.NONE
) {
return of(EncryptionStatus.Okay);
} else if (encryptionSystem.kind === E2eeType.PER_PARTICIPANT) {
return combineLatest([
encryptionErrorObservable$(
livekitRoom$,
participant,
encryptionSystem,
"MissingKey",
),
encryptionErrorObservable$(
livekitRoom$,
participant,
encryptionSystem,
"InvalidKey",
),
observeRemoteTrackReceivingOkay$(participant, audioSource),
observeRemoteTrackReceivingOkay$(participant, videoSource),
]).pipe(
map(([keyMissing, keyInvalid, audioOkay, videoOkay]) => {
if (keyMissing) return EncryptionStatus.KeyMissing;
if (keyInvalid) return EncryptionStatus.KeyInvalid;
if (audioOkay || videoOkay) return EncryptionStatus.Okay;
return undefined; // no change
}),
filter((x) => !!x),
startWith(EncryptionStatus.Connecting),
);
} else {
return combineLatest([
encryptionErrorObservable$(
livekitRoom$,
participant,
encryptionSystem,
"InvalidKey",
),
observeRemoteTrackReceivingOkay$(participant, audioSource),
observeRemoteTrackReceivingOkay$(participant, videoSource),
]).pipe(
map(
([keyInvalid, audioOkay, videoOkay]):
| EncryptionStatus
| undefined => {
if (keyInvalid) return EncryptionStatus.PasswordInvalid;
if (audioOkay || videoOkay) return EncryptionStatus.Okay;
return undefined; // no change
},
),
filter((x) => !!x),
startWith(EncryptionStatus.Connecting),
);
}
}),
),
),
};
}
function encryptionErrorObservable$(
room$: Behavior<LivekitRoom | undefined>,
participant: Participant,
encryptionSystem: EncryptionSystem,
criteria: string,
): Observable<boolean> {
return room$.pipe(
switchMap((room) => {
if (room === undefined) return of(false);
return roomEventSelector(room, LivekitRoomEvent.EncryptionError).pipe(
map((e) => {
const [err] = e;
if (encryptionSystem.kind === E2eeType.PER_PARTICIPANT) {
return (
// Ideally we would pull the participant identity from the field on the error.
// However, it gets lost in the serialization process between workers.
// So, instead we do a string match
(err?.message.includes(participant.identity) &&
err?.message.includes(criteria)) ??
false
);
} else if (encryptionSystem.kind === E2eeType.SHARED_KEY) {
return !!err?.message.includes(criteria);
}
return false;
}),
);
}),
distinctUntilChanged(),
throttleTime(1000), // Throttle to avoid spamming the UI
startWith(false),
);
}
function observeRemoteTrackReceivingOkay$(
participant: Participant,
source: Track.Source,
): Observable<boolean | undefined> {
let lastStats: {
framesDecoded: number | undefined;
framesDropped: number | undefined;
framesReceived: number | undefined;
} = {
framesDecoded: undefined,
framesDropped: undefined,
framesReceived: undefined,
};
return observeInboundRtpStreamStats$(participant, source).pipe(
map((stats) => {
if (!stats) return undefined;
const { framesDecoded, framesDropped, framesReceived } = stats;
return {
framesDecoded,
framesDropped,
framesReceived,
};
}),
filter((newStats) => !!newStats),
map((newStats): boolean | undefined => {
const oldStats = lastStats;
lastStats = newStats;
if (
typeof newStats.framesReceived === "number" &&
typeof oldStats.framesReceived === "number" &&
typeof newStats.framesDecoded === "number" &&
typeof oldStats.framesDecoded === "number"
) {
const framesReceivedDelta =
newStats.framesReceived - oldStats.framesReceived;
const framesDecodedDelta =
newStats.framesDecoded - oldStats.framesDecoded;
// if we received >0 frames and managed to decode >0 frames then we treat that as success
if (framesReceivedDelta > 0) {
return framesDecodedDelta > 0;
}
}
// no change
return undefined;
}),
filter((x) => typeof x === "boolean"),
startWith(undefined),
);
}

View File

@@ -0,0 +1,44 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type RemoteParticipant } from "livekit-client";
import { map } from "rxjs";
import { type Behavior } from "../Behavior";
import {
type BaseScreenShareInputs,
type BaseScreenShareViewModel,
createBaseScreenShare,
} from "./ScreenShareViewModel";
import { type ObservableScope } from "../ObservableScope";
export interface RemoteScreenShareViewModel extends BaseScreenShareViewModel {
local: false;
/**
* Whether this screen share's video should be displayed.
*/
videoEnabled$: Behavior<boolean>;
}
export interface RemoteScreenShareInputs extends BaseScreenShareInputs {
participant$: Behavior<RemoteParticipant | null>;
pretendToBeDisconnected$: Behavior<boolean>;
}
export function createRemoteScreenShare(
scope: ObservableScope,
{ pretendToBeDisconnected$, ...inputs }: RemoteScreenShareInputs,
): RemoteScreenShareViewModel {
return {
...createBaseScreenShare(scope, inputs),
local: false,
videoEnabled$: scope.behavior(
pretendToBeDisconnected$.pipe(map((disconnected) => !disconnected)),
),
};
}

View File

@@ -0,0 +1,82 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type RemoteParticipant } from "livekit-client";
import { combineLatest, map, of, switchMap } from "rxjs";
import { type Behavior } from "../Behavior";
import { createVolumeControls, type VolumeControls } from "../VolumeControls";
import {
type BaseUserMediaInputs,
type BaseUserMediaViewModel,
createBaseUserMedia,
} from "./UserMediaViewModel";
import { type ObservableScope } from "../ObservableScope";
export interface RemoteUserMediaViewModel
extends BaseUserMediaViewModel, VolumeControls {
local: false;
/**
* Whether we are waiting for this user's LiveKit participant to exist. This
* could be because either we or the remote party are still connecting.
*/
waitingForMedia$: Behavior<boolean>;
}
export interface RemoteUserMediaInputs extends Omit<
BaseUserMediaInputs,
"statsType"
> {
participant$: Behavior<RemoteParticipant | null>;
pretendToBeDisconnected$: Behavior<boolean>;
}
export function createRemoteUserMedia(
scope: ObservableScope,
{ pretendToBeDisconnected$, ...inputs }: RemoteUserMediaInputs,
): RemoteUserMediaViewModel {
const baseUserMedia = createBaseUserMedia(scope, {
...inputs,
statsType: "inbound-rtp",
});
return {
...baseUserMedia,
...createVolumeControls(scope, {
pretendToBeDisconnected$,
sink$: scope.behavior(
inputs.participant$.pipe(map((p) => (volume) => p?.setVolume(volume))),
),
}),
local: false,
speaking$: scope.behavior(
pretendToBeDisconnected$.pipe(
switchMap((disconnected) =>
disconnected ? of(false) : baseUserMedia.speaking$,
),
),
),
videoEnabled$: scope.behavior(
pretendToBeDisconnected$.pipe(
switchMap((disconnected) =>
disconnected ? of(false) : baseUserMedia.videoEnabled$,
),
),
),
waitingForMedia$: scope.behavior(
combineLatest(
[inputs.livekitRoom$, inputs.participant$],
(livekitRoom, participant) =>
// If livekitRoom is undefined, the user is not attempting to publish on
// any transport and so we shouldn't expect a participant. (They might
// be a subscribe-only bot for example.)
livekitRoom !== undefined && participant === null,
),
),
};
}

View File

@@ -0,0 +1,51 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { Track } from "livekit-client";
import { type ObservableScope } from "../ObservableScope";
import { type LocalScreenShareViewModel } from "./LocalScreenShareViewModel";
import {
createMemberMedia,
type MemberMediaInputs,
type MemberMediaViewModel,
} from "./MemberMediaViewModel";
import { type RemoteScreenShareViewModel } from "./RemoteScreenShareViewModel";
/**
* A participant's screen share media.
*/
export type ScreenShareViewModel =
| LocalScreenShareViewModel
| RemoteScreenShareViewModel;
/**
* Properties which are common to all ScreenShareViewModels.
*/
export interface BaseScreenShareViewModel extends MemberMediaViewModel {
type: "screen share";
}
export type BaseScreenShareInputs = Omit<
MemberMediaInputs,
"audioSource" | "videoSource"
>;
export function createBaseScreenShare(
scope: ObservableScope,
inputs: BaseScreenShareInputs,
): BaseScreenShareViewModel {
return {
...createMemberMedia(scope, {
...inputs,
audioSource: Track.Source.ScreenShareAudio,
videoSource: Track.Source.ScreenShare,
}),
type: "screen share",
};
}

View File

@@ -0,0 +1,162 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import {
BehaviorSubject,
combineLatest,
map,
type Observable,
of,
Subject,
switchMap,
} from "rxjs";
import {
observeParticipantEvents,
observeParticipantMedia,
} from "@livekit/components-core";
import { ParticipantEvent, Track } from "livekit-client";
import { type ReactionOption } from "../../reactions";
import { type Behavior } from "../Behavior";
import { type LocalUserMediaViewModel } from "./LocalUserMediaViewModel";
import {
createMemberMedia,
type MemberMediaInputs,
type MemberMediaViewModel,
} from "./MemberMediaViewModel";
import { type RemoteUserMediaViewModel } from "./RemoteUserMediaViewModel";
import { type ObservableScope } from "../ObservableScope";
import { showConnectionStats } from "../../settings/settings";
import { observeRtpStreamStats$ } from "./observeRtpStreamStats";
import { videoFit$, videoSizeFromParticipant$ } from "../../utils/videoFit.ts";
/**
* A participant's user media (i.e. their microphone and camera feed).
*/
export type UserMediaViewModel =
| LocalUserMediaViewModel
| RemoteUserMediaViewModel;
export interface BaseUserMediaViewModel extends MemberMediaViewModel {
type: "user";
speaking$: Behavior<boolean>;
audioEnabled$: Behavior<boolean>;
videoEnabled$: Behavior<boolean>;
videoFit$: Behavior<"cover" | "contain">;
toggleCropVideo: () => void;
/**
* The expected identity of the LiveKit participant. Exposed for debugging.
*/
rtcBackendIdentity: string;
handRaised$: Behavior<Date | null>;
reaction$: Behavior<ReactionOption | null>;
audioStreamStats$: Observable<
RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats | undefined
>;
videoStreamStats$: Observable<
RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats | undefined
>;
/**
* Set the actual dimensions of the HTML element.
* This can be used to determine the best video fit (fit to frame / keep ratio).
* @param width - The actual width of the HTML element displaying the video.
* @param height - The actual height of the HTML element displaying the video.
*/
setActualDimensions: (width: number, height: number) => void;
}
export interface BaseUserMediaInputs extends Omit<
MemberMediaInputs,
"audioSource" | "videoSource"
> {
rtcBackendIdentity: string;
handRaised$: Behavior<Date | null>;
reaction$: Behavior<ReactionOption | null>;
statsType: "inbound-rtp" | "outbound-rtp";
}
export function createBaseUserMedia(
scope: ObservableScope,
{
rtcBackendIdentity,
handRaised$,
reaction$,
statsType,
...inputs
}: BaseUserMediaInputs,
): BaseUserMediaViewModel {
const { participant$ } = inputs;
const media$ = scope.behavior(
participant$.pipe(
switchMap((p) => (p && observeParticipantMedia(p)) ?? of(undefined)),
),
);
const toggleCropVideo$ = new Subject<void>();
const actualSize$ = new BehaviorSubject<
{ width: number; height: number } | undefined
>(undefined);
return {
...createMemberMedia(scope, {
...inputs,
audioSource: Track.Source.Microphone,
videoSource: Track.Source.Camera,
}),
type: "user",
speaking$: scope.behavior(
participant$.pipe(
switchMap((p) =>
p
? observeParticipantEvents(
p,
ParticipantEvent.IsSpeakingChanged,
).pipe(map((p) => p.isSpeaking))
: of(false),
),
),
),
audioEnabled$: scope.behavior(
media$.pipe(map((m) => m?.microphoneTrack?.isMuted === false)),
),
videoEnabled$: scope.behavior(
media$.pipe(map((m) => m?.cameraTrack?.isMuted === false)),
),
videoFit$: videoFit$(
scope,
videoSizeFromParticipant$(participant$),
actualSize$,
),
toggleCropVideo: () => toggleCropVideo$.next(),
rtcBackendIdentity,
handRaised$,
reaction$,
audioStreamStats$: combineLatest([
participant$,
showConnectionStats.value$,
]).pipe(
switchMap(([p, showConnectionStats]) => {
//
if (!p || !showConnectionStats) return of(undefined);
return observeRtpStreamStats$(p, Track.Source.Microphone, statsType);
}),
),
videoStreamStats$: combineLatest([
participant$,
showConnectionStats.value$,
]).pipe(
switchMap(([p, showConnectionStats]) => {
if (!p || !showConnectionStats) return of(undefined);
return observeRtpStreamStats$(p, Track.Source.Camera, statsType);
}),
),
setActualDimensions: (width: number, height: number): void => {
actualSize$.next({ width, height });
},
};
}

View File

@@ -0,0 +1,78 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import {
LocalTrack,
type Participant,
RemoteTrack,
type Track,
} from "livekit-client";
import {
combineLatest,
interval,
type Observable,
startWith,
switchMap,
map,
} from "rxjs";
import { observeTrackReference$ } from "../observeTrackReference";
export function observeRtpStreamStats$(
participant: Participant,
source: Track.Source,
type: "inbound-rtp" | "outbound-rtp",
): Observable<
RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats | undefined
> {
return combineLatest([
observeTrackReference$(participant, source),
interval(1000).pipe(startWith(0)),
]).pipe(
switchMap(async ([trackReference]) => {
const track = trackReference?.publication?.track;
if (
!track ||
!(track instanceof RemoteTrack || track instanceof LocalTrack)
) {
return undefined;
}
const report = await track.getRTCStatsReport();
if (!report) {
return undefined;
}
for (const v of report.values()) {
if (v.type === type) {
return v;
}
}
return undefined;
}),
startWith(undefined),
);
}
export function observeInboundRtpStreamStats$(
participant: Participant,
source: Track.Source,
): Observable<RTCInboundRtpStreamStats | undefined> {
return observeRtpStreamStats$(participant, source, "inbound-rtp").pipe(
map((x) => x as RTCInboundRtpStreamStats | undefined),
);
}
export function observeOutboundRtpStreamStats$(
participant: Participant,
source: Track.Source,
): Observable<RTCOutboundRtpStreamStats | undefined> {
return observeRtpStreamStats$(participant, source, "outbound-rtp").pipe(
map((x) => x as RTCOutboundRtpStreamStats | undefined),
);
}

View File

@@ -0,0 +1,110 @@
/*
Copyright 2024 New Vector Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { describe, test } from "vitest";
import { withTestScheduler } from "../../utils/test";
import { observeSpeaker$ } from "./observeSpeaker";
const yesNo = {
y: true,
n: false,
};
describe("observeSpeaker", () => {
describe("does not activate", () => {
const expectedOutputMarbles = "n";
test("starts correctly", () => {
// should default to false when no input is given
const speakingInputMarbles = "";
withTestScheduler(({ hot, expectObservable }) => {
expectObservable(
observeSpeaker$(hot(speakingInputMarbles, yesNo)),
).toBe(expectedOutputMarbles, yesNo);
});
});
test("after no speaking", () => {
const speakingInputMarbles = "n";
withTestScheduler(({ hot, expectObservable }) => {
expectObservable(
observeSpeaker$(hot(speakingInputMarbles, yesNo)),
).toBe(expectedOutputMarbles, yesNo);
});
});
test("with speaking for 1ms", () => {
const speakingInputMarbles = "y n";
withTestScheduler(({ hot, expectObservable }) => {
expectObservable(
observeSpeaker$(hot(speakingInputMarbles, yesNo)),
).toBe(expectedOutputMarbles, yesNo);
});
});
test("with speaking for 999ms", () => {
const speakingInputMarbles = "y 999ms n";
withTestScheduler(({ hot, expectObservable }) => {
expectObservable(
observeSpeaker$(hot(speakingInputMarbles, yesNo)),
).toBe(expectedOutputMarbles, yesNo);
});
});
test("with speaking intermittently", () => {
const speakingInputMarbles =
"y 199ms n 199ms y 199ms n 199ms y 199ms n 199ms y 199ms n 199ms y 199ms n 199ms y 199ms n 199ms y 199ms n 199ms y 199ms n";
withTestScheduler(({ hot, expectObservable }) => {
expectObservable(
observeSpeaker$(hot(speakingInputMarbles, yesNo)),
).toBe(expectedOutputMarbles, yesNo);
});
});
test("with consecutive speaking then stops speaking", () => {
const speakingInputMarbles = "y y y y y y y y y y n";
withTestScheduler(({ hot, expectObservable }) => {
expectObservable(
observeSpeaker$(hot(speakingInputMarbles, yesNo)),
).toBe(expectedOutputMarbles, yesNo);
});
});
});
describe("activates", () => {
test("after 1s", () => {
// this will active after 1s as no `n` follows it:
const speakingInputMarbles = " y";
const expectedOutputMarbles = "n 999ms y";
withTestScheduler(({ hot, expectObservable }) => {
expectObservable(
observeSpeaker$(hot(speakingInputMarbles, yesNo)),
).toBe(expectedOutputMarbles, yesNo);
});
});
test("speaking for 1001ms activates for 60s", () => {
const speakingInputMarbles = " y 1s n ";
const expectedOutputMarbles = "n 999ms y 60s n";
withTestScheduler(({ hot, expectObservable }) => {
expectObservable(
observeSpeaker$(hot(speakingInputMarbles, yesNo)),
).toBe(expectedOutputMarbles, yesNo);
});
});
test("speaking for 5s activates for 64s", () => {
const speakingInputMarbles = " y 5s n ";
const expectedOutputMarbles = "n 999ms y 64s n";
withTestScheduler(({ hot, expectObservable }) => {
expectObservable(
observeSpeaker$(hot(speakingInputMarbles, yesNo)),
).toBe(expectedOutputMarbles, yesNo);
});
});
});
});

View File

@@ -0,0 +1,36 @@
/*
Copyright 2024 New Vector Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import {
type Observable,
audit,
merge,
timer,
filter,
startWith,
distinctUntilChanged,
} from "rxjs";
/**
* Require 1 second of continuous speaking to become a speaker, and 60 second of
* continuous silence to stop being considered a speaker
*/
export function observeSpeaker$(
isSpeakingObservable$: Observable<boolean>,
): Observable<boolean> {
const distinct$ = isSpeakingObservable$.pipe(distinctUntilChanged());
return distinct$.pipe(
// Either change to the new value after the timer or re-emit the same value if it toggles back
// (audit will return the latest (toggled back) value) before the timeout.
audit((s) =>
merge(timer(s ? 1000 : 60000), distinct$.pipe(filter((s1) => s1 !== s))),
),
// Filter the re-emissions (marked as: | ) that happen if we toggle quickly (<1s) from false->true->false|->..
startWith(false),
distinctUntilChanged(),
);
}