Improve error recovery when starting a recording

This helps return the microphone access to the user.
pull/21833/head
Travis Ralston 2021-05-05 22:30:22 -06:00
parent b5c25498c8
commit b61fe2f8e6
2 changed files with 89 additions and 68 deletions

View File

@ -73,7 +73,9 @@ class ConsoleLogger {
// Convert objects and errors to helpful things // Convert objects and errors to helpful things
args = args.map((arg) => { args = args.map((arg) => {
if (arg instanceof Error) { if (arg instanceof DOMException) {
return arg.message + ` (${arg.name} | ${arg.code}) ` + (arg.stack ? `\n${arg.stack}` : '');
} else if (arg instanceof Error) {
return arg.message + (arg.stack ? `\n${arg.stack}` : ''); return arg.message + (arg.stack ? `\n${arg.stack}` : '');
} else if (typeof (arg) === 'object') { } else if (typeof (arg) === 'object') {
try { try {

View File

@ -90,78 +90,97 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
} }
private async makeRecorder() { private async makeRecorder() {
this.recorderStream = await navigator.mediaDevices.getUserMedia({ try {
audio: { this.recorderStream = await navigator.mediaDevices.getUserMedia({
channelCount: CHANNELS, audio: {
noiseSuppression: true, // browsers ignore constraints they can't honour channelCount: CHANNELS,
deviceId: CallMediaHandler.getAudioInput(), noiseSuppression: true, // browsers ignore constraints they can't honour
}, deviceId: CallMediaHandler.getAudioInput(),
}); },
this.recorderContext = new AudioContext({ });
// latencyHint: "interactive", // we don't want a latency hint (this causes data smoothing) this.recorderContext = new AudioContext({
}); // latencyHint: "interactive", // we don't want a latency hint (this causes data smoothing)
this.recorderSource = this.recorderContext.createMediaStreamSource(this.recorderStream); });
this.recorderFFT = this.recorderContext.createAnalyser(); this.recorderSource = this.recorderContext.createMediaStreamSource(this.recorderStream);
this.recorderFFT = this.recorderContext.createAnalyser();
// Bring the FFT time domain down a bit. The default is 2048, and this must be a power // Bring the FFT time domain down a bit. The default is 2048, and this must be a power
// of two. We use 64 points because we happen to know down the line we need less than // of two. We use 64 points because we happen to know down the line we need less than
// that, but 32 would be too few. Large numbers are not helpful here and do not add // that, but 32 would be too few. Large numbers are not helpful here and do not add
// precision: they introduce higher precision outputs of the FFT (frequency data), but // precision: they introduce higher precision outputs of the FFT (frequency data), but
// it makes the time domain less than helpful. // it makes the time domain less than helpful.
this.recorderFFT.fftSize = 64; this.recorderFFT.fftSize = 64;
// Set up our worklet. We use this for timing information and waveform analysis: the // Set up our worklet. We use this for timing information and waveform analysis: the
// web audio API prefers this be done async to avoid holding the main thread with math. // web audio API prefers this be done async to avoid holding the main thread with math.
const mxRecorderWorkletPath = document.body.dataset.vectorRecorderWorkletScript; const mxRecorderWorkletPath = document.body.dataset.vectorRecorderWorkletScript;
if (!mxRecorderWorkletPath) { if (!mxRecorderWorkletPath) {
throw new Error("Unable to create recorder: no worklet script registered"); // noinspection ExceptionCaughtLocallyJS
} throw new Error("Unable to create recorder: no worklet script registered");
await this.recorderContext.audioWorklet.addModule(mxRecorderWorkletPath);
this.recorderWorklet = new AudioWorkletNode(this.recorderContext, WORKLET_NAME);
// Connect our inputs and outputs
this.recorderSource.connect(this.recorderFFT);
this.recorderSource.connect(this.recorderWorklet);
this.recorderWorklet.connect(this.recorderContext.destination);
// Dev note: we can't use `addEventListener` for some reason. It just doesn't work.
this.recorderWorklet.port.onmessage = (ev) => {
switch (ev.data['ev']) {
case PayloadEvent.Timekeep:
this.processAudioUpdate(ev.data['timeSeconds']);
break;
case PayloadEvent.AmplitudeMark:
// Sanity check to make sure we're adding about one sample per second
if (ev.data['forSecond'] === this.amplitudes.length) {
this.amplitudes.push(ev.data['amplitude']);
}
break;
} }
}; await this.recorderContext.audioWorklet.addModule(mxRecorderWorkletPath);
this.recorderWorklet = new AudioWorkletNode(this.recorderContext, WORKLET_NAME);
this.recorder = new Recorder({ // Connect our inputs and outputs
encoderPath, // magic from webpack this.recorderSource.connect(this.recorderFFT);
encoderSampleRate: SAMPLE_RATE, this.recorderSource.connect(this.recorderWorklet);
encoderApplication: 2048, // voice (default is "audio") this.recorderWorklet.connect(this.recorderContext.destination);
streamPages: true, // this speeds up the encoding process by using CPU over time
encoderFrameSize: 20, // ms, arbitrary frame size we send to the encoder
numberOfChannels: CHANNELS,
sourceNode: this.recorderSource,
encoderBitRate: BITRATE,
// We use low values for the following to ease CPU usage - the resulting waveform // Dev note: we can't use `addEventListener` for some reason. It just doesn't work.
// is indistinguishable for a voice message. Note that the underlying library will this.recorderWorklet.port.onmessage = (ev) => {
// pick defaults which prefer the highest possible quality, CPU be damned. switch (ev.data['ev']) {
encoderComplexity: 3, // 0-10, 10 is slow and high quality. case PayloadEvent.Timekeep:
resampleQuality: 3, // 0-10, 10 is slow and high quality this.processAudioUpdate(ev.data['timeSeconds']);
}); break;
this.recorder.ondataavailable = (a: ArrayBuffer) => { case PayloadEvent.AmplitudeMark:
const buf = new Uint8Array(a); // Sanity check to make sure we're adding about one sample per second
const newBuf = new Uint8Array(this.buffer.length + buf.length); if (ev.data['forSecond'] === this.amplitudes.length) {
newBuf.set(this.buffer, 0); this.amplitudes.push(ev.data['amplitude']);
newBuf.set(buf, this.buffer.length); }
this.buffer = newBuf; break;
}; }
};
this.recorder = new Recorder({
encoderPath, // magic from webpack
encoderSampleRate: SAMPLE_RATE,
encoderApplication: 2048, // voice (default is "audio")
streamPages: true, // this speeds up the encoding process by using CPU over time
encoderFrameSize: 20, // ms, arbitrary frame size we send to the encoder
numberOfChannels: CHANNELS,
sourceNode: this.recorderSource,
encoderBitRate: BITRATE,
// We use low values for the following to ease CPU usage - the resulting waveform
// is indistinguishable for a voice message. Note that the underlying library will
// pick defaults which prefer the highest possible quality, CPU be damned.
encoderComplexity: 3, // 0-10, 10 is slow and high quality.
resampleQuality: 3, // 0-10, 10 is slow and high quality
});
this.recorder.ondataavailable = (a: ArrayBuffer) => {
const buf = new Uint8Array(a);
const newBuf = new Uint8Array(this.buffer.length + buf.length);
newBuf.set(this.buffer, 0);
newBuf.set(buf, this.buffer.length);
this.buffer = newBuf;
};
} catch (e) {
console.error("Error starting recording: ", e);
if (e instanceof DOMException) { // Unhelpful DOMExceptions are common - parse them sanely
console.error(`${e.name} (${e.code}): ${e.message}`);
}
// Clean up as best as possible
if (this.recorderStream) this.recorderStream.getTracks().forEach(t => t.stop());
if (this.recorderSource) this.recorderSource.disconnect();
if (this.recorder) this.recorder.close();
if (this.recorderContext) {
// noinspection ES6MissingAwait - not important that we wait
this.recorderContext.close();
}
throw e; // rethrow so upstream can handle it
}
} }
private get audioBuffer(): Uint8Array { private get audioBuffer(): Uint8Array {