Replace deprecated processor with a worklet
parent
6f794cca9b
commit
7d9562137e
|
@ -129,4 +129,31 @@ declare global {
|
||||||
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Error/columnNumber
|
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Error/columnNumber
|
||||||
columnNumber?: number;
|
columnNumber?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// https://github.com/microsoft/TypeScript/issues/28308#issuecomment-650802278
|
||||||
|
interface AudioWorkletProcessor {
|
||||||
|
readonly port: MessagePort;
|
||||||
|
process(
|
||||||
|
inputs: Float32Array[][],
|
||||||
|
outputs: Float32Array[][],
|
||||||
|
parameters: Record<string, Float32Array>
|
||||||
|
): boolean;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/microsoft/TypeScript/issues/28308#issuecomment-650802278
|
||||||
|
const AudioWorkletProcessor: {
|
||||||
|
prototype: AudioWorkletProcessor;
|
||||||
|
new (options?: AudioWorkletNodeOptions): AudioWorkletProcessor;
|
||||||
|
};
|
||||||
|
|
||||||
|
// https://github.com/microsoft/TypeScript/issues/28308#issuecomment-650802278
|
||||||
|
function registerProcessor(
|
||||||
|
name: string,
|
||||||
|
processorCtor: (new (
|
||||||
|
options?: AudioWorkletNodeOptions
|
||||||
|
) => AudioWorkletProcessor) & {
|
||||||
|
parameterDescriptors?: AudioParamDescriptor[];
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,37 @@
|
||||||
|
/*
|
||||||
|
Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import {ITimingPayload, PayloadEvent, WORKLET_NAME} from "./consts";
|
||||||
|
|
||||||
|
// from AudioWorkletGlobalScope: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletGlobalScope
|
||||||
|
declare const currentTime: number;
|
||||||
|
declare const currentFrame: number;
|
||||||
|
declare const sampleRate: number;
|
||||||
|
|
||||||
|
class MxVoiceWorklet extends AudioWorkletProcessor {
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
process(inputs, outputs, parameters) {
|
||||||
|
this.port.postMessage(<ITimingPayload>{ev: PayloadEvent.Timekeep, timeSeconds: currentTime});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
registerProcessor(WORKLET_NAME, MxVoiceWorklet);
|
||||||
|
|
||||||
|
export default null; // to appease module loaders (we never use the export)
|
|
@ -23,6 +23,7 @@ import {clamp} from "../utils/numbers";
|
||||||
import EventEmitter from "events";
|
import EventEmitter from "events";
|
||||||
import {IDestroyable} from "../utils/IDestroyable";
|
import {IDestroyable} from "../utils/IDestroyable";
|
||||||
import {Singleflight} from "../utils/Singleflight";
|
import {Singleflight} from "../utils/Singleflight";
|
||||||
|
import {PayloadEvent, WORKLET_NAME} from "./consts";
|
||||||
|
|
||||||
const CHANNELS = 1; // stereo isn't important
|
const CHANNELS = 1; // stereo isn't important
|
||||||
const SAMPLE_RATE = 48000; // 48khz is what WebRTC uses. 12khz is where we lose quality.
|
const SAMPLE_RATE = 48000; // 48khz is what WebRTC uses. 12khz is where we lose quality.
|
||||||
|
@ -49,7 +50,7 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
|
||||||
private recorderSource: MediaStreamAudioSourceNode;
|
private recorderSource: MediaStreamAudioSourceNode;
|
||||||
private recorderStream: MediaStream;
|
private recorderStream: MediaStream;
|
||||||
private recorderFFT: AnalyserNode;
|
private recorderFFT: AnalyserNode;
|
||||||
private recorderProcessor: ScriptProcessorNode;
|
private recorderWorklet: AudioWorkletNode;
|
||||||
private buffer = new Uint8Array(0);
|
private buffer = new Uint8Array(0);
|
||||||
private mxc: string;
|
private mxc: string;
|
||||||
private recording = false;
|
private recording = false;
|
||||||
|
@ -93,18 +94,28 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
|
||||||
// it makes the time domain less than helpful.
|
// it makes the time domain less than helpful.
|
||||||
this.recorderFFT.fftSize = 64;
|
this.recorderFFT.fftSize = 64;
|
||||||
|
|
||||||
// We use an audio processor to get accurate timing information.
|
// Set up our worklet. We use this for timing information and waveform analysis: the
|
||||||
// The size of the audio buffer largely decides how quickly we push timing/waveform data
|
// web audio API prefers this be done async to avoid holding the main thread with math.
|
||||||
// out of this class. Smaller buffers mean we update more frequently as we can't hold as
|
const mxRecorderWorkletPath = document.body.dataset.vectorRecorderWorkletScript;
|
||||||
// many bytes. Larger buffers mean slower updates. For scale, 1024 gives us about 30Hz of
|
if (!mxRecorderWorkletPath) {
|
||||||
// updates and 2048 gives us about 20Hz. We use 1024 to get as close to perceived realtime
|
throw new Error("Unable to create recorder: no worklet script registered");
|
||||||
// as possible. Must be a power of 2.
|
}
|
||||||
this.recorderProcessor = this.recorderContext.createScriptProcessor(1024, CHANNELS, CHANNELS);
|
await this.recorderContext.audioWorklet.addModule(mxRecorderWorkletPath);
|
||||||
|
this.recorderWorklet = new AudioWorkletNode(this.recorderContext, WORKLET_NAME);
|
||||||
|
|
||||||
// Connect our inputs and outputs
|
// Connect our inputs and outputs
|
||||||
this.recorderSource.connect(this.recorderFFT);
|
this.recorderSource.connect(this.recorderFFT);
|
||||||
this.recorderSource.connect(this.recorderProcessor);
|
this.recorderSource.connect(this.recorderWorklet);
|
||||||
this.recorderProcessor.connect(this.recorderContext.destination);
|
this.recorderWorklet.connect(this.recorderContext.destination);
|
||||||
|
|
||||||
|
// Dev note: we can't use `addEventListener` for some reason. It just doesn't work.
|
||||||
|
this.recorderWorklet.port.onmessage = (ev) => {
|
||||||
|
switch(ev.data['ev']) {
|
||||||
|
case PayloadEvent.Timekeep:
|
||||||
|
this.processAudioUpdate(ev.data['timeSeconds']);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
this.recorder = new Recorder({
|
this.recorder = new Recorder({
|
||||||
encoderPath, // magic from webpack
|
encoderPath, // magic from webpack
|
||||||
|
@ -151,7 +162,7 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
|
||||||
return this.mxc;
|
return this.mxc;
|
||||||
}
|
}
|
||||||
|
|
||||||
private processAudioUpdate = (ev: AudioProcessingEvent) => {
|
private processAudioUpdate = (timeSeconds: number) => {
|
||||||
if (!this.recording) return;
|
if (!this.recording) return;
|
||||||
|
|
||||||
// The time domain is the input to the FFT, which means we use an array of the same
|
// The time domain is the input to the FFT, which means we use an array of the same
|
||||||
|
@ -175,12 +186,12 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
|
||||||
|
|
||||||
this.observable.update({
|
this.observable.update({
|
||||||
waveform: translatedData,
|
waveform: translatedData,
|
||||||
timeSeconds: ev.playbackTime,
|
timeSeconds: timeSeconds,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Now that we've updated the data/waveform, let's do a time check. We don't want to
|
// Now that we've updated the data/waveform, let's do a time check. We don't want to
|
||||||
// go horribly over the limit. We also emit a warning state if needed.
|
// go horribly over the limit. We also emit a warning state if needed.
|
||||||
const secondsLeft = TARGET_MAX_LENGTH - ev.playbackTime;
|
const secondsLeft = TARGET_MAX_LENGTH - timeSeconds;
|
||||||
if (secondsLeft <= 0) {
|
if (secondsLeft <= 0) {
|
||||||
// noinspection JSIgnoredPromiseFromCall - we aren't concerned with it overlapping
|
// noinspection JSIgnoredPromiseFromCall - we aren't concerned with it overlapping
|
||||||
this.stop();
|
this.stop();
|
||||||
|
@ -204,7 +215,6 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
|
||||||
}
|
}
|
||||||
this.observable = new SimpleObservable<IRecordingUpdate>();
|
this.observable = new SimpleObservable<IRecordingUpdate>();
|
||||||
await this.makeRecorder();
|
await this.makeRecorder();
|
||||||
this.recorderProcessor.addEventListener("audioprocess", this.processAudioUpdate);
|
|
||||||
await this.recorder.start();
|
await this.recorder.start();
|
||||||
this.recording = true;
|
this.recording = true;
|
||||||
this.emit(RecordingState.Started);
|
this.emit(RecordingState.Started);
|
||||||
|
@ -218,6 +228,7 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
|
||||||
|
|
||||||
// Disconnect the source early to start shutting down resources
|
// Disconnect the source early to start shutting down resources
|
||||||
this.recorderSource.disconnect();
|
this.recorderSource.disconnect();
|
||||||
|
this.recorderWorklet.disconnect();
|
||||||
await this.recorder.stop();
|
await this.recorder.stop();
|
||||||
|
|
||||||
// close the context after the recorder so the recorder doesn't try to
|
// close the context after the recorder so the recorder doesn't try to
|
||||||
|
@ -229,7 +240,6 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
|
||||||
|
|
||||||
// Finally do our post-processing and clean up
|
// Finally do our post-processing and clean up
|
||||||
this.recording = false;
|
this.recording = false;
|
||||||
this.recorderProcessor.removeEventListener("audioprocess", this.processAudioUpdate);
|
|
||||||
await this.recorder.close();
|
await this.recorder.close();
|
||||||
this.emit(RecordingState.Ended);
|
this.emit(RecordingState.Ended);
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
/*
|
||||||
|
Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
export const WORKLET_NAME = "mx-voice-worklet";
|
||||||
|
|
||||||
|
export enum PayloadEvent {
|
||||||
|
Timekeep = "timekeep",
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface IPayload {
|
||||||
|
ev: PayloadEvent;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ITimingPayload extends IPayload {
|
||||||
|
timeSeconds: number;
|
||||||
|
}
|
Loading…
Reference in New Issue