AudioWorkletで録音します。
RecorderNode.js
export default class RecorderNode extends AudioWorkletNode {
constructor(audioCtx){
super(audioCtx, 'recorder-processor');
this.buffers = []; //processorから受け取ったFloat32Arrayを貯める
this.port.onmessage = e => this.buffers.push(e.data);
}
//Float32Arrayの配列を繋げて新しいFloat32Arrayを返す
mergeBuffers(buffers){
const sampleLength = buffers.reduce((acc, buf) => acc + buf.length, 0);
const array = new Float32Array(sampleLength);
let sampleIdx = 0;
for (let i = 0; i < buffers.length; i++) {
for (let j = 0; j < buffers[i].length; j++) {
samples[sampleIdx] = buffers[i][j];
sampleIdx++;
}
}
return samples;
}
//this.buffersを繋げたFloat32Arrayを取得
getData(){
const samples = this.mergeBuffers(this.buffers);
this.buffers = [];
return samples;
}
}
RecorderProcessor.js
class RecorderProcessor extends AudioWorkletProcessor {
process(inputs){
const inputData = inputs[0][0];
if(inputData instanceof Float32Array)
this.port.postMessage(inputData.subarray());
return true;
}
}
registerProcessor('recorder-processor', RecorderProcessor);
index.html
<html>
<script type="module">
import RecorderNode from 'RecorderNode.js';
window.onload = async ()=>{
const audioCtx = new AudioContext();
await audioCtx.audioWorklet.addModule('RecorderProcessor.js');
//マイク入力を取得
const stream = await navigator.mediaDevices.getUserMedia({video: false, audio: true});
const streamNode = audioCtx.createMediaStreamSource(stream);
const recorderNode = new RecorderNode(audioCtx);
//マイクを接続した時点で録音が始まる
streamNode.connect(recorderNode).connect(audioCtx.destination);
//10秒後にAudioBufferSourceNodeで録音したデータを再生
setTimeout(()=>{
streamNode.disconnect();
recorderNode.disconnect();
const samples = recorderNode.getData(); //録音したデータ(Float32Array)
const audioBuffer = audioCtx.createBuffer(1, samples.length, audioCtx.sampleRate);
audioBuffer.copyToChannel(samples, 0);
const source = audioCtx.createBufferSource();
source.buffer = audioBuffer;
source.start();
}, 10000);
}
</script>
</html>