我正在使用getUserMedia API在浏览器中录制音频,然后将此音频发送到websocket服务器.此外,为了测试录音,我在Mac上使用soundflower作为输入设备,因此我可以播放波形文件,而不是说话麦克风.
客户端(JavaScript)
window.AudioContext = window.AudioContext || window.webkitAudioContext;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var audioContext = new AudioContext();
var wsClient = new WebSocket("ws://" + WEBSOCKET_URL + ":" + WEBSOCKET_PORT);
navigator.getUserMedia({audio: true}, function (stream) {
var input = audioContext.createMediaStreamSource(stream);
var recordNode = audioContext.createScriptProcessor(4096);
recordNode.onaudioprocess = recorderProcess;
input.connect(recordNode);
recordNode.connect(audioContext.destination);
}, function (e) {
console.error("No live audio input: " + e);
});
function recorderProcess(e) {
var buffer = e.inputBuffer.getChannelData(0);
wsClient.send(buffer);
}
Run Code Online (Sandbox Code Playgroud)
服务器端(python)
在服务器端,我只是在文件中写入块:
def onMessage(self, msg, …Run Code Online (Sandbox Code Playgroud)