✨(front) add blurring support on Firefox
We cannot use track-processor-js for Firefox because it uses various approaches not compatible with Firefox. Firefox cannot run inference on GPU, so we must use CPU, we could also not use MediaStreamTrackProcessor nor MediaStreamTrackGenerator. So I had to make a new implementation from the ground up, using canvas filters and CPU inferences.
This commit is contained in:
@@ -5,11 +5,9 @@ import { Text, P, ToggleButton, Div, H } from '@/primitives'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { HStack, styled, VStack } from '@/styled-system/jsx'
|
||||
import {
|
||||
BackgroundBlur,
|
||||
BackgroundOptions,
|
||||
ProcessorWrapper,
|
||||
BackgroundTransformer,
|
||||
} from '@livekit/track-processors'
|
||||
BackgroundBlurFactory,
|
||||
BackgroundBlurProcessorInterface,
|
||||
} from './blur/index'
|
||||
|
||||
const Information = styled('div', {
|
||||
base: {
|
||||
@@ -27,6 +25,8 @@ enum BlurRadius {
|
||||
NORMAL = 10,
|
||||
}
|
||||
|
||||
const isSupported = BackgroundBlurFactory.isSupported()
|
||||
|
||||
export const Effects = () => {
|
||||
const { t } = useTranslation('rooms', { keyPrefix: 'effects' })
|
||||
const { isCameraEnabled, cameraTrack, localParticipant } =
|
||||
@@ -37,15 +37,12 @@ export const Effects = () => {
|
||||
const localCameraTrack = cameraTrack?.track as LocalVideoTrack
|
||||
|
||||
const getProcessor = () => {
|
||||
return localCameraTrack?.getProcessor() as ProcessorWrapper<BackgroundOptions>
|
||||
return localCameraTrack?.getProcessor() as BackgroundBlurProcessorInterface
|
||||
}
|
||||
|
||||
const getBlurRadius = (): BlurRadius => {
|
||||
const processor = getProcessor()
|
||||
return (
|
||||
(processor?.transformer as BackgroundTransformer)?.options?.blurRadius ||
|
||||
BlurRadius.NONE
|
||||
)
|
||||
return processor?.options.blurRadius || BlurRadius.NONE
|
||||
}
|
||||
|
||||
const toggleBlur = async (blurRadius: number) => {
|
||||
@@ -61,9 +58,11 @@ export const Effects = () => {
|
||||
if (blurRadius == currentBlurRadius && processor) {
|
||||
await localCameraTrack.stopProcessor()
|
||||
} else if (!processor) {
|
||||
await localCameraTrack.setProcessor(BackgroundBlur(blurRadius))
|
||||
await localCameraTrack.setProcessor(
|
||||
BackgroundBlurFactory.getProcessor({ blurRadius })!
|
||||
)
|
||||
} else {
|
||||
await processor?.updateTransformerOptions({ blurRadius })
|
||||
processor?.update({ blurRadius })
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error applying blur:', error)
|
||||
@@ -147,7 +146,7 @@ export const Effects = () => {
|
||||
>
|
||||
{t('heading')}
|
||||
</H>
|
||||
{ProcessorWrapper.isSupported ? (
|
||||
{isSupported ? (
|
||||
<HStack>
|
||||
<ToggleButton
|
||||
size={'sm'}
|
||||
|
||||
@@ -0,0 +1,278 @@
|
||||
import { ProcessorOptions, Track } from 'livekit-client'
|
||||
import {
|
||||
FilesetResolver,
|
||||
ImageSegmenter,
|
||||
ImageSegmenterResult,
|
||||
} from '@mediapipe/tasks-vision'
|
||||
import {
|
||||
CLEAR_TIMEOUT,
|
||||
SET_TIMEOUT,
|
||||
TIMEOUT_TICK,
|
||||
timerWorkerScript,
|
||||
} from './TimerWorker'
|
||||
import { BackgroundBlurProcessorInterface, BackgroundOptions } from '.'
|
||||
|
||||
const PROCESSING_WIDTH = 256
|
||||
const PROCESSING_HEIGHT = 144
|
||||
|
||||
const SEGMENTATION_MASK_CANVAS_ID = 'background-blur-local-segmentation'
|
||||
const BLUR_CANVAS_ID = 'background-blur-local'
|
||||
|
||||
const DEFAULT_BLUR = '10'
|
||||
|
||||
/**
|
||||
* This implementation of video blurring is made to be run on CPU for browser that are
|
||||
* not compatible with track-processor-js.
|
||||
*
|
||||
* It also make possible to run blurring on browser that does not implement MediaStreamTrackGenerator and
|
||||
* MediaStreamTrackProcessor.
|
||||
*/
|
||||
export class BackgroundBlurCustomProcessor
|
||||
implements BackgroundBlurProcessorInterface
|
||||
{
|
||||
options: BackgroundOptions
|
||||
name: string
|
||||
processedTrack?: MediaStreamTrack | undefined
|
||||
|
||||
source?: MediaStreamTrack
|
||||
sourceSettings?: MediaTrackSettings
|
||||
videoElement?: HTMLVideoElement
|
||||
videoElementLoaded?: boolean
|
||||
|
||||
// Canvas containg the video processing result, of which we extract as stream.
|
||||
outputCanvas?: HTMLCanvasElement
|
||||
outputCanvasCtx?: CanvasRenderingContext2D
|
||||
|
||||
imageSegmenter?: ImageSegmenter
|
||||
imageSegmenterResult?: ImageSegmenterResult
|
||||
|
||||
// Canvas used for resizing video source and projecting mask.
|
||||
segmentationMaskCanvas?: HTMLCanvasElement
|
||||
segmentationMaskCanvasCtx?: CanvasRenderingContext2D
|
||||
|
||||
// Mask containg the inference result.
|
||||
segmentationMask?: ImageData
|
||||
|
||||
// The resized image of the video source.
|
||||
sourceImageData?: ImageData
|
||||
|
||||
timerWorker?: Worker
|
||||
|
||||
constructor(opts: BackgroundOptions) {
|
||||
this.name = 'blur'
|
||||
this.options = opts
|
||||
}
|
||||
|
||||
static get isSupported() {
|
||||
return navigator.userAgent.toLowerCase().includes('firefox')
|
||||
}
|
||||
|
||||
async init(opts: ProcessorOptions<Track.Kind>) {
|
||||
if (!opts.element) {
|
||||
throw new Error('Element is required for processing')
|
||||
}
|
||||
|
||||
this.source = opts.track as MediaStreamTrack
|
||||
this.sourceSettings = this.source!.getSettings()
|
||||
this.videoElement = opts.element as HTMLVideoElement
|
||||
|
||||
this._createMainCanvas()
|
||||
this._createMaskCanvas()
|
||||
|
||||
const stream = this.outputCanvas!.captureStream()
|
||||
const tracks = stream.getVideoTracks()
|
||||
if (tracks.length == 0) {
|
||||
throw new Error('No tracks found for processing')
|
||||
}
|
||||
this.processedTrack = tracks[0]
|
||||
|
||||
this.segmentationMask = new ImageData(PROCESSING_WIDTH, PROCESSING_HEIGHT)
|
||||
await this.initSegmenter()
|
||||
this._initWorker()
|
||||
}
|
||||
|
||||
update(opts: BackgroundOptions): void {
|
||||
this.options = opts
|
||||
}
|
||||
|
||||
_initWorker() {
|
||||
this.timerWorker = new Worker(timerWorkerScript, {
|
||||
name: 'Blurring',
|
||||
})
|
||||
this.timerWorker.onmessage = (data) => this.onTimerMessage(data)
|
||||
// When hiding camera then showing it again, the onloadeddata callback is not fired again.
|
||||
if (this.videoElementLoaded) {
|
||||
this.timerWorker!.postMessage({
|
||||
id: SET_TIMEOUT,
|
||||
timeMs: 1000 / 30,
|
||||
})
|
||||
} else {
|
||||
this.videoElement!.onloadeddata = () => {
|
||||
this.videoElementLoaded = true
|
||||
this.timerWorker!.postMessage({
|
||||
id: SET_TIMEOUT,
|
||||
timeMs: 1000 / 30,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
onTimerMessage(response: { data: { id: number } }) {
|
||||
if (response.data.id === TIMEOUT_TICK) {
|
||||
this.process()
|
||||
}
|
||||
}
|
||||
|
||||
async initSegmenter() {
|
||||
const vision = await FilesetResolver.forVisionTasks(
|
||||
'https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision/wasm'
|
||||
)
|
||||
this.imageSegmenter = await ImageSegmenter.createFromOptions(vision, {
|
||||
baseOptions: {
|
||||
modelAssetPath:
|
||||
'https://storage.googleapis.com/mediapipe-models/image_segmenter/selfie_segmenter_landscape/float16/latest/selfie_segmenter_landscape.tflite',
|
||||
delegate: 'CPU', // Use CPU for Firefox.
|
||||
},
|
||||
runningMode: 'VIDEO',
|
||||
outputCategoryMask: true,
|
||||
outputConfidenceMasks: false,
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Resize the source video to the processing resolution.
|
||||
*/
|
||||
async sizeSource() {
|
||||
this.segmentationMaskCanvasCtx?.drawImage(
|
||||
this.videoElement!,
|
||||
0,
|
||||
0,
|
||||
this.videoElement!.videoWidth,
|
||||
this.videoElement!.videoHeight,
|
||||
0,
|
||||
0,
|
||||
PROCESSING_WIDTH,
|
||||
PROCESSING_HEIGHT
|
||||
)
|
||||
|
||||
this.sourceImageData = this.segmentationMaskCanvasCtx?.getImageData(
|
||||
0,
|
||||
0,
|
||||
PROCESSING_WIDTH,
|
||||
PROCESSING_WIDTH
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Run the segmentation.
|
||||
*/
|
||||
async segment() {
|
||||
const startTimeMs = performance.now()
|
||||
return new Promise<void>((resolve) => {
|
||||
this.imageSegmenter!.segmentForVideo(
|
||||
this.sourceImageData!,
|
||||
startTimeMs,
|
||||
(result: ImageSegmenterResult) => {
|
||||
this.imageSegmenterResult = result
|
||||
resolve()
|
||||
}
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* TODO: future improvement with WebGL.
|
||||
*/
|
||||
async blur() {
|
||||
const mask = this.imageSegmenterResult!.categoryMask!.getAsUint8Array()
|
||||
for (let i = 0; i < mask.length; ++i) {
|
||||
this.segmentationMask!.data[i * 4 + 3] = 255 - mask[i]
|
||||
}
|
||||
|
||||
this.segmentationMaskCanvasCtx!.putImageData(this.segmentationMask!, 0, 0)
|
||||
|
||||
this.outputCanvasCtx!.globalCompositeOperation = 'copy'
|
||||
this.outputCanvasCtx!.filter = 'blur(8px)'
|
||||
|
||||
this.outputCanvasCtx!.drawImage(
|
||||
this.segmentationMaskCanvas!,
|
||||
0,
|
||||
0,
|
||||
PROCESSING_WIDTH,
|
||||
PROCESSING_HEIGHT,
|
||||
0,
|
||||
0,
|
||||
this.videoElement!.videoWidth,
|
||||
this.videoElement!.videoHeight
|
||||
)
|
||||
|
||||
this.outputCanvasCtx!.globalCompositeOperation = 'source-in'
|
||||
this.outputCanvasCtx!.filter = 'none'
|
||||
this.outputCanvasCtx!.drawImage(this.videoElement!, 0, 0)
|
||||
|
||||
this.outputCanvasCtx!.globalCompositeOperation = 'destination-over'
|
||||
this.outputCanvasCtx!.filter = `blur(${this.options.blurRadius ?? DEFAULT_BLUR}px)`
|
||||
this.outputCanvasCtx!.drawImage(this.videoElement!, 0, 0)
|
||||
}
|
||||
|
||||
async process() {
|
||||
await this.sizeSource()
|
||||
await this.segment()
|
||||
await this.blur()
|
||||
this.timerWorker!.postMessage({
|
||||
id: SET_TIMEOUT,
|
||||
timeMs: 1000 / 30,
|
||||
})
|
||||
}
|
||||
|
||||
_createMainCanvas() {
|
||||
this.outputCanvas = document.querySelector(
|
||||
'canvas#background-blur-local'
|
||||
) as HTMLCanvasElement
|
||||
if (!this.outputCanvas) {
|
||||
this.outputCanvas = this._createCanvas(
|
||||
BLUR_CANVAS_ID,
|
||||
this.sourceSettings!.width!,
|
||||
this.sourceSettings!.height!
|
||||
)
|
||||
}
|
||||
this.outputCanvasCtx = this.outputCanvas.getContext('2d')!
|
||||
}
|
||||
|
||||
_createMaskCanvas() {
|
||||
this.segmentationMaskCanvas = document.querySelector(
|
||||
`#${SEGMENTATION_MASK_CANVAS_ID}`
|
||||
) as HTMLCanvasElement
|
||||
if (!this.segmentationMaskCanvas) {
|
||||
this.segmentationMaskCanvas = this._createCanvas(
|
||||
SEGMENTATION_MASK_CANVAS_ID,
|
||||
PROCESSING_WIDTH,
|
||||
PROCESSING_HEIGHT
|
||||
)
|
||||
}
|
||||
this.segmentationMaskCanvasCtx =
|
||||
this.segmentationMaskCanvas.getContext('2d')!
|
||||
}
|
||||
|
||||
_createCanvas(id: string, width: number, height: number) {
|
||||
const element = document.createElement('canvas')
|
||||
element.setAttribute('id', id)
|
||||
element.setAttribute('width', '' + width)
|
||||
element.setAttribute('height', '' + height)
|
||||
return element
|
||||
}
|
||||
|
||||
async restart(opts: ProcessorOptions<Track.Kind>) {
|
||||
await this.destroy()
|
||||
return this.init(opts)
|
||||
}
|
||||
|
||||
async destroy() {
|
||||
this.timerWorker?.postMessage({
|
||||
id: CLEAR_TIMEOUT,
|
||||
})
|
||||
|
||||
this.timerWorker?.terminate()
|
||||
this.imageSegmenter?.close()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
import {
|
||||
BackgroundBlur,
|
||||
BackgroundTransformer,
|
||||
ProcessorWrapper,
|
||||
} from '@livekit/track-processors'
|
||||
import { ProcessorOptions, Track } from 'livekit-client'
|
||||
import { BackgroundBlurProcessorInterface, BackgroundOptions } from '.'
|
||||
|
||||
/**
|
||||
* This is simply a wrapper around track-processor-js Processor
|
||||
* in order to be compatible with a common interface BackgroundBlurProcessorInterface
|
||||
* used accross the project.
|
||||
*/
|
||||
export class BackgroundBlurTrackProcessorJsWrapper
|
||||
implements BackgroundBlurProcessorInterface
|
||||
{
|
||||
name: string = 'Blur'
|
||||
|
||||
processor: ProcessorWrapper<BackgroundOptions>
|
||||
|
||||
constructor(opts: BackgroundOptions) {
|
||||
this.processor = BackgroundBlur(opts.blurRadius)
|
||||
}
|
||||
|
||||
async init(opts: ProcessorOptions<Track.Kind>) {
|
||||
return this.processor.init(opts)
|
||||
}
|
||||
|
||||
async restart(opts: ProcessorOptions<Track.Kind>) {
|
||||
return this.processor.restart(opts)
|
||||
}
|
||||
|
||||
async destroy() {
|
||||
return this.processor.destroy()
|
||||
}
|
||||
|
||||
update(opts: BackgroundOptions): void {
|
||||
this.processor.updateTransformerOptions(opts)
|
||||
}
|
||||
|
||||
get processedTrack() {
|
||||
return this.processor.processedTrack
|
||||
}
|
||||
|
||||
get options() {
|
||||
return (this.processor.transformer as BackgroundTransformer).options
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,71 @@
|
||||
/**
|
||||
* From https://github.com/jitsi/jitsi-meet
|
||||
*/
|
||||
|
||||
/**
|
||||
* SET_TIMEOUT constant is used to set interval and it is set in
|
||||
* the id property of the request.data property. TimeMs property must
|
||||
* also be set.
|
||||
*
|
||||
* ```
|
||||
* //Request.data example:
|
||||
* {
|
||||
* id: SET_TIMEOUT,
|
||||
* timeMs: 33
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export const SET_TIMEOUT = 1
|
||||
|
||||
/**
|
||||
* CLEAR_TIMEOUT constant is used to clear the interval and it is set in
|
||||
* the id property of the request.data property.
|
||||
*
|
||||
* ```
|
||||
* {
|
||||
* id: CLEAR_TIMEOUT
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export const CLEAR_TIMEOUT = 2
|
||||
|
||||
/**
|
||||
* TIMEOUT_TICK constant is used as response and it is set in the id property.
|
||||
*
|
||||
* ```
|
||||
* {
|
||||
* id: TIMEOUT_TICK
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export const TIMEOUT_TICK = 3
|
||||
|
||||
/**
|
||||
* The following code is needed as string to create a URL from a Blob.
|
||||
* The URL is then passed to a WebWorker. Reason for this is to enable
|
||||
* use of setInterval that is not throttled when tab is inactive.
|
||||
*/
|
||||
const code = `
|
||||
var timer;
|
||||
|
||||
onmessage = function(request) {
|
||||
switch (request.data.id) {
|
||||
case ${SET_TIMEOUT}: {
|
||||
timer = setTimeout(() => {
|
||||
postMessage({ id: ${TIMEOUT_TICK} });
|
||||
}, request.data.timeMs);
|
||||
break;
|
||||
}
|
||||
case ${CLEAR_TIMEOUT}: {
|
||||
if (timer) {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
`
|
||||
|
||||
export const timerWorkerScript = URL.createObjectURL(
|
||||
new Blob([code], { type: 'application/javascript' })
|
||||
)
|
||||
@@ -0,0 +1,32 @@
|
||||
import { ProcessorWrapper } from '@livekit/track-processors'
|
||||
import { Track, TrackProcessor } from 'livekit-client'
|
||||
import { BackgroundBlurTrackProcessorJsWrapper } from './BackgroundBlurTrackProcessorJsWrapper'
|
||||
import { BackgroundBlurCustomProcessor } from './BackgroundBlurCustomProcessor'
|
||||
|
||||
export type BackgroundOptions = {
|
||||
blurRadius?: number
|
||||
}
|
||||
|
||||
export interface BackgroundBlurProcessorInterface
|
||||
extends TrackProcessor<Track.Kind> {
|
||||
update(opts: BackgroundOptions): void
|
||||
options: BackgroundOptions
|
||||
}
|
||||
|
||||
export class BackgroundBlurFactory {
|
||||
static isSupported() {
|
||||
return (
|
||||
ProcessorWrapper.isSupported || BackgroundBlurCustomProcessor.isSupported
|
||||
)
|
||||
}
|
||||
|
||||
static getProcessor(opts: BackgroundOptions) {
|
||||
if (ProcessorWrapper.isSupported) {
|
||||
return new BackgroundBlurTrackProcessorJsWrapper(opts)
|
||||
}
|
||||
if (BackgroundBlurCustomProcessor.isSupported) {
|
||||
return new BackgroundBlurCustomProcessor(opts)
|
||||
}
|
||||
return null
|
||||
}
|
||||
}
|
||||
@@ -87,7 +87,7 @@
|
||||
},
|
||||
"effects": {
|
||||
"activateCamera": "Your camera is disabled. Choose an option to enable it.",
|
||||
"notAvailable": "The blur effect will be available soon on your browser. We're working on it! In the meantime, you can use Google Chrome :(",
|
||||
"notAvailable": "The blur effect will be available soon on your browser. We're working on it! In the meantime, you can use Google Chrome for best performance or Firefox :(",
|
||||
"heading": "Blur",
|
||||
"blur": {
|
||||
"light": "Light blur",
|
||||
|
||||
@@ -87,7 +87,7 @@
|
||||
},
|
||||
"effects": {
|
||||
"activateCamera": "Votre camera est désactivée. Choisissez une option pour l'activer.",
|
||||
"notAvailable": "L'effet de flou sera bientôt disponible sur votre navigateur. Nous y travaillons ! En attendant, vous pouvez utiliser Google Chrome :(",
|
||||
"notAvailable": "L'effet de flou sera bientôt disponible sur votre navigateur. Nous y travaillons ! En attendant, vous pouvez utiliser Google Chrome pour une meilleure performance ou Firefox :(",
|
||||
"heading": "Flou",
|
||||
"blur": {
|
||||
"light": "Léger flou",
|
||||
|
||||
Reference in New Issue
Block a user