从cordova-plugin-audioinput到Google Speech API的音频流

Kev*_*vin 5 javascript cordova meteor google-speech-api google-cloud-speech

对于使用Meteor框架的跨平台应用程序项目,我想记录麦克风输入并提取语音,这要归功于Google Speech API

根据Google文档,我更具体地尝试构建音频流以馈送Google Speech客户端。

在客户端,一个录制按钮触发以下startCapture功能(基于cordova audioinput plugin):

export var startCapture = function () {
  try {
    if (window.audioinput && !audioinput.isCapturing()) {

      setTimeout(stopCapture, 20000);

      var captureCfg = {
        sampleRate: 16000,
        bufferSize: 2048,
      }
      audioinput.start(captureCfg);
    }
  }
  catch (e) {
  }
}
Run Code Online (Sandbox Code Playgroud)

audioinput 事件使我可以获取记录的音频数据块:

window.addEventListener('audioinput', onAudioInputCapture, false);

var audioDataQueue = [];
function onAudioInputCapture(evt) {
  try {
    if (evt && evt.data) {
      // Push the data to the audio queue (array)
      audioDataQueue.push(evt.data);

      // Here should probably be a call to a Meteor server method?
    }
  }
  catch (e) {
  }
}
Run Code Online (Sandbox Code Playgroud)

我正在努力将记录的音频数据转换为一些ReadableStream,并将其通过管道传输到服务器端的Google Speech API客户端。

const speech = require('@google-cloud/speech');

const client = new speech.SpeechClient();
const request = {
  config: {
    encoding: "LINEAR16",
    sampleRateHertz: 16000,
    languageCode: 'en-US',
  },
 interimResults: true,
};

export const recognizeStream = client
 .streamingRecognize(request)
 .on('error', console.error)
 .on('data', data =>
   console.log(data.results)
 );
Run Code Online (Sandbox Code Playgroud)

我尝试了以下方法,但感觉不正确:

const Stream = require('stream')

var serverAudioDataQueue = [];
const readable = new Stream.Readable({
  objectMode: true,
});
readable._read = function(n){
  this.push(audioDataQueue.splice(0, audioDataQueue.length))
}
readable.pipe(recognizeStream);

Meteor.methods({
  'feedGoogleSpeech': function(data){
    data.forEach(item=>serverAudioDataQueue.push(item));
  },
...
});
Run Code Online (Sandbox Code Playgroud)

有什么见解吗?