启动音频队列失败???

Chr*_*rat 1 speech-recognition avfoundation ios swift sfspeechrecognizer

我正在尝试创建一个应用程序,当用户不知道时,它会显示一个会更改每个级别的文本。目标是说出一个显示的句子(是的,它是为孩子们设计的):

\n\n
@IBAction func dontknow(_ sender: Any) {\n    let utterance = AVSpeechUtterance(string: textLabel.text)\n    utterance.voice = AVSpeechSynthesisVoice(language: "fr-FR")\n    utterance.rate = 0.4\n\n    let synthesizer = AVSpeechSynthesizer()\n    synthesizer.speak(utterance)\n\n}\n
Run Code Online (Sandbox Code Playgroud)\n\n

该应用程序的构造如下:\n如果用户不知道 -> 他可以单击按钮说出文本\n如果他是对的 -> 他进入下一个级别。

\n\n

当他第一次输入要说的按钮时,应用程序会说一些话,但是当用户尝试说出文本并且在下一个级别时,他单击要说的按钮,则什么也没有发生。它只是抛出这个错误:Failure starting audio queue \xe2\x89\xa5\xcb\x9a\xcb\x9b\xcb\x87

\n\n

完整代码:

\n\n
import UIKit\nimport AVFoundation\nimport Speech\n\nclass ReadViewController: UIViewController, SFSpeechRecognizerDelegate {\n    var texts = ["Je mange des p\xc3\xa2tes", "Bonjour Jean comment vas-tu", "Qui est-ce", "J\'en ai marre", "Je ne te trouve pas gentil", "Pourquoi tu ne veux pas","Tu es si gentil", "Tu es beau", "Dans combien de temps", "Tu as fait de beaux r\xc3\xaaves", "Cette application est une r\xc3\xa9volution"];\n    var text = ""\n    var transcriptedText = "";\n    var effect:UIVisualEffect!\n\n\n\n\n    @IBOutlet weak var dontknowButton: UIButton!\n    @IBOutlet weak var listenButton: UIButton!\n    @IBOutlet weak var visualEffectView: UIVisualEffectView!\n    @IBOutlet var alertView: UIView!\n    @IBOutlet weak var regameButton: UIButton!\n    @IBOutlet weak var textView: UILabel!\n    @IBOutlet weak var textLabel: UILabel!\n    @IBOutlet weak var microphoneButton: UIButton!\n    @IBOutlet weak var transci: UILabel!\n\n\n    private let speechRecognizer = SFSpeechRecognizer(locale: Locale.init(identifier: "fr-FR"))  //1\n    private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?\n    private var recognitionTask: SFSpeechRecognitionTask?\n    private let audioEngine = AVAudioEngine()\n    var recordingSession: AVAudioSession!\n    var player: AVAudioPlayer!\n\n    override func viewDidLoad() {\n        super.viewDidLoad()\n        listenButton.layer.cornerRadius = 10\n        dontknowButton.layer.cornerRadius = dontknowButton.frame.width / 2\n        self.restart()\n        // Do any additional setup after loading the view.\n\n\n\n\n        effect = visualEffectView.effect\n        visualEffectView.layer.opacity = 0\n        visualEffectView.effect = nil\n        regameButton.layer.cornerRadius = 10\n\n\n        microphoneButton.layer.cornerRadius = 10\n\n        microphoneButton.isEnabled = false  //2\n\n        speechRecognizer?.delegate = self  //3\n\n\n        SFSpeechRecognizer.requestAuthorization { (authStatus) in  //4\n\n            var isButtonEnabled = false\n\n            switch authStatus {  //5\n            case .authorized:\n                isButtonEnabled = true\n\n            case .denied:\n                isButtonEnabled = false\n                self.alert()\n\n            case .restricted:\n                isButtonEnabled = false\n                self.alert()\n\n            case .notDetermined:\n                isButtonEnabled = false\n                print("Speech recognition not yet authorized")\n            }\n\n            OperationQueue.main.addOperation() {\n                self.microphoneButton.isEnabled = isButtonEnabled\n            }\n\n\n            //            self.effect = self.visualEffectView.effect\n            //            self.visualEffectView.effect = nil\n\n\n        }\n\n    }\n    func alert () {\n        let alertController = UIAlertController (title: "D\xc3\xa9sol\xc3\xa9", message: "Pour le bon fonctionnement de l\'application, vous devez activer la reconnaissance vocale dans les r\xc3\xa9glages.", preferredStyle: .alert)\n\n        let settingsAction = UIAlertAction(title: "R\xc3\xa9glages", style: .default) { (_) -> Void in\n            guard let settingsUrl = URL(string: UIApplicationOpenSettingsURLString) else {\n                return\n            }\n\n            if UIApplication.shared.canOpenURL(settingsUrl) {\n                UIApplication.shared.open(settingsUrl, completionHandler: { (success) in\n                    print("Settings opened: \\(success)") // Prints true\n                })\n            }\n        }\n        alertController.addAction(settingsAction)\n\n\n        present(alertController, animated: true, completion: nil)\n    }\n    func restart() {\n\n        var randomNumber = random(0..<(texts.count))\n        text = texts[randomNumber]\n\n        textLabel.text = "\\(text)"\n\n    }\n\n\n\n    func startRecording() {\n\n        if recognitionTask != nil {\n            recognitionTask?.cancel()\n            recognitionTask = nil\n        }\n\n        let audioSession = AVAudioSession.sharedInstance()\n        do {\n            try audioSession.setCategory(AVAudioSessionCategoryRecord)\n            try audioSession.setMode(AVAudioSessionModeMeasurement)\n            try audioSession.setActive(true, with: .notifyOthersOnDeactivation)\n        } catch {\n            print("audioSession properties weren\'t set because of an error.")\n        }\n\n        recognitionRequest = SFSpeechAudioBufferRecognitionRequest()\n\n        var inputNode = audioEngine.inputNode\n\n        guard let recognitionRequest = recognitionRequest else {\n            fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")\n        }\n\n        recognitionRequest.shouldReportPartialResults = true\n\n        recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in\n\n            var isFinal = false\n\n            if result != nil {\n\n                self.transcriptedText = (result?.bestTranscription.formattedString)!\n                self.transci.text = self.transcriptedText\n\n\n\n                isFinal = (result?.isFinal)!\n\n\n            }\n\n            if error != nil || isFinal {\n                self.audioEngine.stop()\n                inputNode.removeTap(onBus: 0)\n\n                self.recognitionRequest = nil\n                self.recognitionTask = nil\n                self.transci.text = ""\n\n                self.microphoneButton.isEnabled = true\n            }\n        })\n\n        let recordingFormat = inputNode.outputFormat(forBus: 0)\n        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in\n            self.recognitionRequest?.append(buffer)\n        }\n\n        audioEngine.prepare()\n\n        do {\n            try audioEngine.start()\n        } catch {\n            print("audioEngine couldn\'t start because of an error.")\n        }\n\n\n    }\n\n\n    func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {\n        if available {\n            microphoneButton.isEnabled = true\n        } else {\n            microphoneButton.isEnabled = false\n        }\n    }\n\n\n\n\n\n    override func didReceiveMemoryWarning() {\n        super.didReceiveMemoryWarning()\n        // Dispose of any resources that can be recreated.\n    }\n\n\n\n\n    func random(_ range:Range<Int>) -> Int\n    {\n        return range.lowerBound + Int(arc4random_uniform(UInt32(range.upperBound - range.lowerBound)))\n    }\n\n\n    @IBAction func start(_ sender: Any) {\n\n        if audioEngine.isRunning {\n            if self.transcriptedText == self.text {\n                // When user won\n                self.restart()\n            }\n            else {\n                // When user loses\n\n                animateIn()\n\n            }\n            audioEngine.stop()\n            recognitionRequest?.endAudio()\n            microphoneButton.isEnabled = false\n            microphoneButton.setTitle("Commencer", for: .normal)\n\n        } else {\n            startRecording()\n            microphoneButton.setTitle("Arr\xc3\xaater", for: .normal)\n        }\n\n    }\n    @IBAction func listen(_ sender: Any) {\n        let utterance = AVSpeechUtterance(string: "wesh")\n        utterance.voice = AVSpeechSynthesisVoice(language: "fr-FR")!\n        utterance.rate = 0.4\n\n        let synthesizer = AVSpeechSynthesizer()\n        synthesizer.speak(utterance)\n    }\n    @IBAction func reGameOn(_ sender: Any) {\n        animateOut()\n    }\n\n    @IBAction func dontknow(_ sender: Any) {\n        var randomNumber = random(0..<(texts.count))\n        var tet = texts[randomNumber]\n        let utterance = AVSpeechUtterance(string: textLabel.text)\n        utterance.voice = AVSpeechSynthesisVoice(language: "fr-FR")!\n        utterance.rate = 0.4\n\n        let synthesizer = AVSpeechSynthesizer()\n        synthesizer.speak(utterance)\n        print(synthesizer.isSpeaking)\n        print(synthesizer.isPaused)\n\n    }\n\n\n\n    func animateIn() {\n        self.view.addSubview(alertView)\n        alertView.center = self.view.center\n\n        alertView.transform = CGAffineTransform.init(scaleX: 1.3, y: 1.3)\n        alertView.alpha = 0\n\n        UIView.animate(withDuration: 0.4) {\n            self.alertView.layer.cornerRadius = 10\n            self.visualEffectView.layer.opacity = 1\n            self.visualEffectView.effect = self.effect\n            self.alertView.alpha = 1\n            self.alertView.transform = CGAffineTransform.identity\n        }\n\n    }\n\n\n    func animateOut () {\n        UIView.animate(withDuration: 0.3, animations: {\n            self.alertView.layer.cornerRadius = 0\n            self.alertView.transform = CGAffineTransform.init(scaleX: 1.3, y: 1.3)\n            self.alertView.alpha = 0\n\n            self.visualEffectView.effect = nil\n            self.visualEffectView.layer.opacity = 0\n        }) { (success:Bool) in\n            self.alertView.removeFromSuperview()\n        }\n    }\n}\n
Run Code Online (Sandbox Code Playgroud)\n

小智 5

在 @IBAction func dontKnow 中尝试在最后添加以下代码。这可能有效。

do{
      let _ = try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback,
                                                               with: .duckOthers)
  }catch{
      print(error)
  }
Run Code Online (Sandbox Code Playgroud)