From 1419ac6b69ee24cb0a59526c5f7b0e14f3f48aa0 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 25 Mar 2021 17:12:26 -0600 Subject: [PATCH] Hook up a clock and implement proper design --- .../views/rooms/_VoiceRecordComposerTile.scss | 40 +++++++++ res/css/views/voice_messages/_Waveform.scss | 20 +++-- .../legacy-light/css/_legacy-light.scss | 3 + res/themes/light/css/_light.scss | 3 + .../views/rooms/VoiceRecordComposerTile.tsx | 19 ++++- src/components/views/voice_messages/Clock.tsx | 42 ++++++++++ .../voice_messages/LiveRecordingClock.tsx | 55 +++++++++++++ .../voice_messages/LiveRecordingWaveform.tsx | 4 +- src/voice/VoiceRecorder.ts | 82 +++++++++++-------- 9 files changed, 222 insertions(+), 46 deletions(-) create mode 100644 src/components/views/voice_messages/Clock.tsx create mode 100644 src/components/views/voice_messages/LiveRecordingClock.tsx diff --git a/res/css/views/rooms/_VoiceRecordComposerTile.scss b/res/css/views/rooms/_VoiceRecordComposerTile.scss index bb36991b4f..2fb112a38c 100644 --- a/res/css/views/rooms/_VoiceRecordComposerTile.scss +++ b/res/css/views/rooms/_VoiceRecordComposerTile.scss @@ -34,3 +34,43 @@ limitations under the License. background-color: $voice-record-stop-symbol-color; } } + +.mx_VoiceRecordComposerTile_waveformContainer { + padding: 5px; + padding-right: 4px; // there's 1px from the waveform itself, so account for that + padding-left: 15px; // +10px for the live circle, +5px for regular padding + background-color: $voice-record-waveform-bg-color; + border-radius: 12px; + margin-right: 12px; // isolate from stop button + + // Cheat at alignment a bit + display: flex; + align-items: center; + + position: relative; // important for the live circle + + color: $voice-record-waveform-fg-color; + font-size: $font-14px; + + &::before { + // TODO: @@ TravisR: Animate + content: ''; + background-color: $voice-record-live-circle-color; + width: 10px; + height: 10px; + position: absolute; + left: 8px; + top: 16px; // vertically center + border-radius: 10px; + } + + .mx_Waveform_bar { + background-color: $voice-record-waveform-fg-color; + } + + .mx_Clock { + padding-right: 8px; // isolate from waveform + padding-left: 10px; // isolate from live circle + width: 42px; // we're not using a monospace font, so fake it + } +} diff --git a/res/css/views/voice_messages/_Waveform.scss b/res/css/views/voice_messages/_Waveform.scss index 23eedf2dbd..cf03c84601 100644 --- a/res/css/views/voice_messages/_Waveform.scss +++ b/res/css/views/voice_messages/_Waveform.scss @@ -17,18 +17,24 @@ limitations under the License. .mx_Waveform { position: relative; height: 30px; // tallest bar can only be 30px + top: 1px; // because of our border trick (see below), we're off by 1px of aligntment display: flex; align-items: center; // so the bars grow from the middle + overflow: hidden; // this is cheaper than a `max-height: calc(100% - 4px)` in the bar's CSS. + + // A bar is meant to be a 2x2 circle when at zero height, and otherwise a 2px wide line + // with rounded caps. .mx_Waveform_bar { - width: 2px; - margin-left: 1px; + width: 0; // 0px width means we'll end up using the border as our width + border: 1px solid transparent; // transparent means we'll use the background colour + border-radius: 2px; // rounded end caps, based on the border + min-height: 0; // like the width, we'll rely on the border to give us height + max-height: 100%; // this makes the `height: 42%` work on the element + margin-left: 1px; // we want 2px between each bar, so 1px on either side for balance margin-right: 1px; - background-color: $muted-fg-color; - display: inline-block; - min-height: 2px; - max-height: 100%; - border-radius: 2px; // give them soft endcaps + + // background color is handled by the parent components } } diff --git a/res/themes/legacy-light/css/_legacy-light.scss b/res/themes/legacy-light/css/_legacy-light.scss index d7ee496d80..c22a8fa2ff 100644 --- a/res/themes/legacy-light/css/_legacy-light.scss +++ b/res/themes/legacy-light/css/_legacy-light.scss @@ -191,6 +191,9 @@ $space-button-outline-color: #E3E8F0; $voice-record-stop-border-color: #E3E8F0; $voice-record-stop-symbol-color: $warning-color; +$voice-record-waveform-bg-color: #E3E8F0; +$voice-record-waveform-fg-color: $muted-fg-color; +$voice-record-live-circle-color: $warning-color; $roomtile-preview-color: #9e9e9e; $roomtile-default-badge-bg-color: #61708b; diff --git a/res/themes/light/css/_light.scss b/res/themes/light/css/_light.scss index 577204ef0c..c778420094 100644 --- a/res/themes/light/css/_light.scss +++ b/res/themes/light/css/_light.scss @@ -182,6 +182,9 @@ $space-button-outline-color: #E3E8F0; $voice-record-stop-border-color: #E3E8F0; $voice-record-stop-symbol-color: $warning-color; +$voice-record-waveform-bg-color: #E3E8F0; +$voice-record-waveform-fg-color: $muted-fg-color; +$voice-record-live-circle-color: $warning-color; $roomtile-preview-color: $secondary-fg-color; $roomtile-default-badge-bg-color: #61708b; diff --git a/src/components/views/rooms/VoiceRecordComposerTile.tsx b/src/components/views/rooms/VoiceRecordComposerTile.tsx index 061daab915..b4999ac0df 100644 --- a/src/components/views/rooms/VoiceRecordComposerTile.tsx +++ b/src/components/views/rooms/VoiceRecordComposerTile.tsx @@ -22,6 +22,8 @@ import {Room} from "matrix-js-sdk/src/models/room"; import {MatrixClientPeg} from "../../../MatrixClientPeg"; import classNames from "classnames"; import LiveRecordingWaveform from "../voice_messages/LiveRecordingWaveform"; +import {replaceableComponent} from "../../../utils/replaceableComponent"; +import LiveRecordingClock from "../voice_messages/LiveRecordingClock"; interface IProps { room: Room; @@ -32,6 +34,10 @@ interface IState { recorder?: VoiceRecorder; } +/** + * Container tile for rendering the voice message recorder in the composer. + */ +@replaceableComponent("views.rooms.VoiceRecordComposerTile") export default class VoiceRecordComposerTile extends React.PureComponent { public constructor(props) { super(props); @@ -61,6 +67,15 @@ export default class VoiceRecordComposerTile extends React.PureComponent + + + ; + } + public render() { const classes = classNames({ 'mx_MessageComposer_button': !this.state.recorder, @@ -68,16 +83,14 @@ export default class VoiceRecordComposerTile extends React.PureComponent; } return (<> - {waveform} + {this.renderWaveformArea()} { + public constructor(props) { + super(props); + } + + public render() { + const minutes = Math.floor(this.props.seconds / 60).toFixed(0).padStart(2, '0'); + const seconds = Math.round(this.props.seconds % 60).toFixed(0).padStart(2, '0'); // hide millis + return {minutes}:{seconds}; + } +} diff --git a/src/components/views/voice_messages/LiveRecordingClock.tsx b/src/components/views/voice_messages/LiveRecordingClock.tsx new file mode 100644 index 0000000000..08b50e42c1 --- /dev/null +++ b/src/components/views/voice_messages/LiveRecordingClock.tsx @@ -0,0 +1,55 @@ +/* +Copyright 2021 The Matrix.org Foundation C.I.C. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +import React from "react"; +import {IRecordingUpdate, VoiceRecorder} from "../../../voice/VoiceRecorder"; +import {replaceableComponent} from "../../../utils/replaceableComponent"; +import Clock from "./Clock"; + +interface IProps { + recorder: VoiceRecorder; +} + +interface IState { + seconds: number; +} + +/** + * A clock for a live recording. + */ +@replaceableComponent("views.voice_messages.LiveRecordingClock") +export default class LiveRecordingClock extends React.PureComponent { + public constructor(props) { + super(props); + + this.state = {seconds: 0}; + this.props.recorder.liveData.onUpdate(this.onRecordingUpdate); + } + + shouldComponentUpdate(nextProps: Readonly, nextState: Readonly, nextContext: any): boolean { + const currentFloor = Math.floor(this.state.seconds); + const nextFloor = Math.floor(nextState.seconds); + return currentFloor !== nextFloor; + } + + private onRecordingUpdate = (update: IRecordingUpdate) => { + this.setState({seconds: update.timeSeconds}); + }; + + public render() { + return ; + } +} diff --git a/src/components/views/voice_messages/LiveRecordingWaveform.tsx b/src/components/views/voice_messages/LiveRecordingWaveform.tsx index 506532744a..8a2a5ae089 100644 --- a/src/components/views/voice_messages/LiveRecordingWaveform.tsx +++ b/src/components/views/voice_messages/LiveRecordingWaveform.tsx @@ -49,12 +49,12 @@ export default class LiveRecordingWaveform extends React.PureComponent percentageOf(b, 0, 0.40) * 100), + heights: bars.map(b => percentageOf(b, 0, 0.35) * 100), }); }; diff --git a/src/voice/VoiceRecorder.ts b/src/voice/VoiceRecorder.ts index a85c3acad3..dec8017b8b 100644 --- a/src/voice/VoiceRecorder.ts +++ b/src/voice/VoiceRecorder.ts @@ -23,12 +23,10 @@ import {SimpleObservable} from "matrix-widget-api"; const CHANNELS = 1; // stereo isn't important const SAMPLE_RATE = 48000; // 48khz is what WebRTC uses. 12khz is where we lose quality. const BITRATE = 24000; // 24kbps is pretty high quality for our use case in opus. -const FREQ_SAMPLE_RATE = 10; // Target rate of frequency data (samples / sec). We don't need this super often. export interface IRecordingUpdate { waveform: number[]; // floating points between 0 (low) and 1 (high). - - // TODO: @@ TravisR: Generalize this for a timing package? + timeSeconds: number; // float } export class VoiceRecorder { @@ -37,11 +35,11 @@ export class VoiceRecorder { private recorderSource: MediaStreamAudioSourceNode; private recorderStream: MediaStream; private recorderFFT: AnalyserNode; + private recorderProcessor: ScriptProcessorNode; private buffer = new Uint8Array(0); private mxc: string; private recording = false; private observable: SimpleObservable; - private freqTimerId: number; public constructor(private client: MatrixClient) { } @@ -71,7 +69,20 @@ export class VoiceRecorder { // it makes the time domain less than helpful. this.recorderFFT.fftSize = 64; + // We use an audio processor to get accurate timing information. + // The size of the audio buffer largely decides how quickly we push timing/waveform data + // out of this class. Smaller buffers mean we update more frequently as we can't hold as + // many bytes. Larger buffers mean slower updates. For scale, 1024 gives us about 30Hz of + // updates and 2048 gives us about 20Hz. We use 2048 because it updates frequently enough + // to feel realtime (~20fps, which is what humans perceive as "realtime"). Must be a power + // of 2. + this.recorderProcessor = this.recorderContext.createScriptProcessor(2048, CHANNELS, CHANNELS); + + // Connect our inputs and outputs this.recorderSource.connect(this.recorderFFT); + this.recorderSource.connect(this.recorderProcessor); + this.recorderProcessor.connect(this.recorderContext.destination); + this.recorder = new Recorder({ encoderPath, // magic from webpack encoderSampleRate: SAMPLE_RATE, @@ -117,6 +128,37 @@ export class VoiceRecorder { return this.mxc; } + private tryUpdateLiveData = (ev: AudioProcessingEvent) => { + if (!this.recording) return; + + // The time domain is the input to the FFT, which means we use an array of the same + // size. The time domain is also known as the audio waveform. We're ignoring the + // output of the FFT here (frequency data) because we're not interested in it. + // + // We use bytes out of the analyser because floats have weird precision problems + // and are slightly more difficult to work with. The bytes are easy to work with, + // which is why we pick them (they're also more precise, but we care less about that). + const data = new Uint8Array(this.recorderFFT.fftSize); + this.recorderFFT.getByteTimeDomainData(data); + + // Because we're dealing with a uint array we need to do math a bit differently. + // If we just `Array.from()` the uint array, we end up with 1s and 0s, which aren't + // what we're after. Instead, we have to use a bit of manual looping to correctly end + // up with the right values + const translatedData: number[] = []; + for (let i = 0; i < data.length; i++) { + // All we're doing here is inverting the amplitude and putting the metric somewhere + // between zero and one. Without the inversion, lower values are "louder", which is + // not super helpful. + translatedData.push(1 - (data[i] / 128.0)); + } + + this.observable.update({ + waveform: translatedData, + timeSeconds: ev.playbackTime, + }); + }; + public async start(): Promise { if (this.mxc || this.hasRecording) { throw new Error("Recording already prepared"); @@ -129,35 +171,7 @@ export class VoiceRecorder { } this.observable = new SimpleObservable(); await this.makeRecorder(); - this.freqTimerId = setInterval(() => { - if (!this.recording) return; - - // The time domain is the input to the FFT, which means we use an array of the same - // size. The time domain is also known as the audio waveform. We're ignoring the - // output of the FFT here (frequency data) because we're not interested in it. - // - // We use bytes out of the analyser because floats have weird precision problems - // and are slightly more difficult to work with. The bytes are easy to work with, - // which is why we pick them (they're also more precise, but we care less about that). - const data = new Uint8Array(this.recorderFFT.fftSize); - this.recorderFFT.getByteTimeDomainData(data); - - // Because we're dealing with a uint array we need to do math a bit differently. - // If we just `Array.from()` the uint array, we end up with 1s and 0s, which aren't - // what we're after. Instead, we have to use a bit of manual looping to correctly end - // up with the right values - const translatedData: number[] = []; - for (let i = 0; i < data.length; i++) { - // All we're doing here is inverting the amplitude and putting the metric somewhere - // between zero and one. Without the inversion, lower values are "louder", which is - // not super helpful. - translatedData.push(1 - (data[i] / 128.0)); - } - - this.observable.update({ - waveform: translatedData, - }); - }, 1000 / FREQ_SAMPLE_RATE) as any as number; // XXX: Linter doesn't understand timer environment + this.recorderProcessor.addEventListener("audioprocess", this.tryUpdateLiveData); await this.recorder.start(); this.recording = true; } @@ -179,8 +193,8 @@ export class VoiceRecorder { this.recorderStream.getTracks().forEach(t => t.stop()); // Finally do our post-processing and clean up - clearInterval(this.freqTimerId); this.recording = false; + this.recorderProcessor.removeEventListener("audioprocess", this.tryUpdateLiveData); await this.recorder.close(); return this.buffer;