gui*_*ido 1 avaudioplayer ios avaudiosession swift avspeechutterance
我有一个导航应用程序,它使用AVSpeechUtterance. 我已经像这样将音量设置为1。speechUtteranceInstance.volume = 1,但与来自 iPhone 的音乐或播客相比,音量仍然非常低,尤其是当声音通过蓝牙或有线连接(如通过蓝牙连接到汽车)
有什么办法可以提高音量吗?(我知道之前已经在 SO 上问过这个问题,但到目前为止还没有找到适合我的解决方案。)
经过更多的研究和尝试,我找到了一个很好的解决方法。
首先,我认为这是一个iOS错误。当以下所有条件都为真时,我发现语音指令本身也被闪避(或至少听起来被闪避)导致语音指令以与 DUCKED 音乐相同的音量播放(因此太软而听不清)。
.duckOtheraudioSessionCategory 隐藏背景音乐我找到的解决方法是将 SpeechUtterance 提供给 AVAudioEngine。这只能在 iOS13 或更高版本上完成,因为这将.write 方法添加到 AVSpeechSynthesizer
简而言之,我使用AVAudioEngine,AVAudioUnitEQ和AVAudioPlayerNode,将 的 globalGain 属性设置AVAudioUnitEQ为大约 10 dB。这也有一些怪癖,但它们可以解决(请参阅代码注释)。
这是完整的代码:
import UIKit
import AVFoundation
import MediaPlayer
class ViewController: UIViewController {
// MARK: AVAudio properties
var engine = AVAudioEngine()
var player = AVAudioPlayerNode()
var eqEffect = AVAudioUnitEQ()
var converter = AVAudioConverter(from: AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatInt16, sampleRate: 22050, channels: 1, interleaved: false)!, to: AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: 22050, channels: 1, interleaved: false)!)
let synthesizer = AVSpeechSynthesizer()
var bufferCounter: Int = 0
let audioSession = AVAudioSession.sharedInstance()
override func viewDidLoad() {
super.viewDidLoad()
let outputFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: 22050, channels: 1, interleaved: false)!
setupAudio(format: outputFormat, globalGain: 0)
}
func activateAudioSession() {
do {
try audioSession.setCategory(.playback, mode: .voicePrompt, options: [.mixWithOthers, .duckOthers])
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
print("An error has occurred while setting the AVAudioSession.")
}
}
@IBAction func tappedPlayButton(_ sender: Any) {
eqEffect.globalGain = 0
play()
}
@IBAction func tappedPlayLoudButton(_ sender: Any) {
eqEffect.globalGain = 10
play()
}
func play() {
let path = Bundle.main.path(forResource: "voiceStart", ofType: "wav")!
let file = try! AVAudioFile(forReading: URL(fileURLWithPath: path))
self.player.scheduleFile(file, at: nil, completionHandler: nil)
let utterance = AVSpeechUtterance(string: "This is to test if iOS is able to boost the voice output above the 100% limit.")
synthesizer.write(utterance) { buffer in
guard let pcmBuffer = buffer as? AVAudioPCMBuffer, pcmBuffer.frameLength > 0 else {
print("could not create buffer or buffer empty")
return
}
// QUIRCK Need to convert the buffer to different format because AVAudioEngine does not support the format returned from AVSpeechSynthesizer
let convertedBuffer = AVAudioPCMBuffer(pcmFormat: AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: pcmBuffer.format.sampleRate, channels: pcmBuffer.format.channelCount, interleaved: false)!, frameCapacity: pcmBuffer.frameCapacity)!
do {
try self.converter!.convert(to: convertedBuffer, from: pcmBuffer)
self.bufferCounter += 1
self.player.scheduleBuffer(convertedBuffer, completionCallbackType: .dataPlayedBack, completionHandler: { (type) -> Void in
DispatchQueue.main.async {
self.bufferCounter -= 1
print(self.bufferCounter)
if self.bufferCounter == 0 {
self.player.stop()
self.engine.stop()
try! self.audioSession.setActive(false, options: [])
}
}
})
self.converter!.reset()
//self.player.prepare(withFrameCount: convertedBuffer.frameLength)
}
catch let error {
print(error.localizedDescription)
}
}
activateAudioSession()
if !self.engine.isRunning {
try! self.engine.start()
}
if !self.player.isPlaying {
self.player.play()
}
}
func setupAudio(format: AVAudioFormat, globalGain: Float) {
// QUIRCK: Connecting the equalizer to the engine somehow starts the shared audioSession, and if that audiosession is not configured with .mixWithOthers and if it's not deactivated afterwards, this will stop any background music that was already playing. So first configure the audio session, then setup the engine and then deactivate the session again.
try? self.audioSession.setCategory(.playback, options: .mixWithOthers)
eqEffect.globalGain = globalGain
engine.attach(player)
engine.attach(eqEffect)
engine.connect(player, to: eqEffect, format: format)
engine.connect(eqEffect, to: engine.mainMixerNode, format: format)
engine.prepare()
try? self.audioSession.setActive(false)
}
}
Run Code Online (Sandbox Code Playgroud)
| 归档时间: |
|
| 查看次数: |
922 次 |
| 最近记录: |