No speech detected in SwiftUI iOS - swift

I am trying to implement a speech-to-text feature in my app but I am facing this error:
Recognition error: Error Domain=kAFAssistantErrorDomain Code=1110 "No speech detected"
I am testing it on a real device
Relevant code:
#State private var isRecording = false {
didSet {
if !isRecording { recognitionTask = nil }
}
}
#State private var recognitionTask: SFSpeechRecognitionTask?
private func startRecording() {
isRecording = true
let speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: "en-US"))!
// Create an SFSpeechAudioBufferRecognitionRequest instance
let request = SFSpeechAudioBufferRecognitionRequest()
let audioEngine = AVAudioEngine()
let inputNode = audioEngine.inputNode
// Append audio samples to the request object
inputNode.installTap(onBus: 0, bufferSize: 1024, format: inputNode.outputFormat(forBus: 0)) { (buffer, time) in
request.append(buffer)
}
// Start the audio engine and start recording
audioEngine.prepare()
do {
try audioEngine.start()
} catch {
print("Audio engine error: ",error)
return
}
// Start recognizing speech from the audio samples
recognitionTask = speechRecognizer.recognitionTask(with: request) { (result, error) in
// Check for errors
if let error {
print("Recognition error: ",error)
return
}
guard let result else { return }
// Update the task variable with the transcription result
self.task = result.bestTranscription.formattedString
}
}
private func stopRecording() {
recognitionTask?.cancel()
isRecording = false
}
and then I have 2 buttons to indicate if it's recording or not and also to trigger the stopRecording() function and the startRecording() function.
Here's the snippet:
if isRecording {
Button {
stopRecording()
} label: {
Image(systemName: "mic.circle.fill")
.resizable()
.frame(width: 25, height: 25)
.foregroundColor(.red)
}
} else {
Button {
startRecording()
} label: {
Image(systemName: "mic.circle.fill")
.resizable()
.frame(width: 25, height: 25)
}
}
Why do I get this error?

Related

Issue using custom sign in with Apple | SwiftUI

Essentially the goal is to create a custom sign in with apple button. The following is what I have so far, the code compiles fine and the prompt to login shows up as expected but after logging in appleIDCredential does not return any values, name, email, etc. all nil. What am I doing wrong here?
import SwiftUI
import AuthenticationServices
class AppleSignInHandler: NSObject, ASAuthorizationControllerDelegate, ASAuthorizationControllerPresentationContextProviding {
func presentationAnchor(for controller: ASAuthorizationController) -> ASPresentationAnchor {
// Return the first window in the current window scene as the presentation anchor for the ASAuthorizationController
return UIApplication.shared.connectedScenes
.first { $0.activationState == .foregroundActive }
.map { $0 as? UIWindowScene }
.flatMap { $0?.windows.first } ?? UIApplication.shared.windows.first!
}
func authorizationController(controller: ASAuthorizationController, didCompleteWithAuthorization authorization: ASAuthorization) {
if let appleIDCredential = authorization.credential as? ASAuthorizationAppleIDCredential {
// Apple Sign In was successful.
// You can now use the `appleIDCredential` to authenticate the user in your app.
let userFullName = appleIDCredential
print("Apple Sign In was successful. User's full name is: \(userFullName)")
}
}
func authorizationController(controller: ASAuthorizationController, didCompleteWithError error: Error) {
// Apple Sign In failed.
// You can handle the error here.
print("Apple Sign In failed with error: \(error.localizedDescription)")
}
}
struct CustomAppleSignInButton: View {
let appleSignInHandler = AppleSignInHandler()
var body: some View {
Button(action: {
// Perform the Apple Sign In process here
let appleIDProvider = ASAuthorizationAppleIDProvider()
let request = appleIDProvider.createRequest()
request.requestedScopes = [.fullName, .email]
let authorizationController = ASAuthorizationController(authorizationRequests: [request])
authorizationController.delegate = appleSignInHandler
authorizationController.presentationContextProvider = appleSignInHandler
authorizationController.performRequests()
}) {
HStack {
Image(systemName: "apple.logo")
.resizable()
.frame(width: 26, height: 32)
.foregroundColor(Color("black"))
Text("Continue With Apple")
.foregroundColor(Color("black"))
}
.padding()
.frame(width: 340, height: 55)
.background(Color(.label))
.cornerRadius(28)
}
}
}
struct TestView: View {
var body: some View {
CustomAppleSignInButton()
}
}

SwiftUI: stop speech recognition recording session upon navigating in TabView

A recording session for speech recognition starts upon opening and navigating to a tab in the TabView, but it also needs to keep recording until either 'back' or 'next' was recognised, after which it can stop the session. Now, if those words weren't recognised and the user navigates within the tabview, the session isn't stopped correctly and throws an error, which then prevents a new recording session from being started.
Error thrown:
[Utility] +[AFAggregator logDictationFailedWithError:] Error Domain=kAFAssistantErrorDomain Code=203 "Corrupt" UserInfo={NSLocalizedDescription=Corrupt, NSUnderlyingError=0x281281aa0 {Error Domain=SiriSpeechErrorDomain Code=102 "(null)"}}
I have tried implementing stopRecording() in the TabView's selection set method, right before a session is started (startRecording()), but that does not seem to work. Is there a way to stop the recording session upon navigating through the TabView? I want to eventually be able to navigate through the tabs using voice.
Content view:
struct ContentView: View {
#State private var selectedTab = 1
static let voiceRecogniser = VoiceRecogniser()
var body: some View {
VStack {
TabView(
selection: Binding(
get: { selectedTab },
set: {
selectedTab = $0
ContentView.voiceRecogniser.startRecording()
})
) {
Text("Tab 1")
.tag(1)
Text("Tab 2")
.tag(2)
Text("Tab 3")
.tag(3)
}
.tabViewStyle(PageTabViewStyle())
}
.onAppear {
ContentView.voiceRecogniser.startRecording()
}
}
}
VoiceRecogniser class:
class VoiceRecogniser {
private let speechRecogniser = SFSpeechRecognizer(locale: Locale(identifier: "en-GB"))!
private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
private var recognitionTask: SFSpeechRecognitionTask?
private let audioEngine = AVAudioEngine()
func startRecording() {
guard speechRecogniser.isAvailable else {
return
}
guard SFSpeechRecognizer.authorizationStatus() == .authorized else {
SFSpeechRecognizer.requestAuthorization({ (status) in
})
return
}
recognitionTask?.cancel()
self.recognitionTask = nil
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: .measurement, options: .duckOthers) // Ensure session can play audio as well as record
try audioSession.setMode(AVAudioSession.Mode.measurement)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
print("Error with audio session")
}
let inputNode = audioEngine.inputNode
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
guard let recognitionRequest = recognitionRequest else { fatalError("Unable to create a SFSpeechAudioBufferRecognitionRequest object") }
recognitionRequest.shouldReportPartialResults = true
self.recognitionTask = speechRecogniser.recognitionTask(with: recognitionRequest) { result, error in
var isFinal = false
if let result = result {
let spokenText = result.bestTranscription.formattedString
let voiceCommands = ["Next", "Back"]
let string = spokenText.lowercased()
for command in voiceCommands {
if (string.contains(command.lowercased())) {
switch command {
case "Next":
print("Go next")
case "Back":
print("Go back")
default:
print("Default")
}
isFinal = true // stop listening once a voice command was recognised
}
}
//isFinal = true // stop listening after saying anything
}
if error != nil || isFinal {
self.stopRecording()
}
}
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.removeTap(onBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
self.recognitionRequest?.append(buffer)
}
audioEngine.prepare()
do {
try audioEngine.start()
} catch {
print("Error whith starting audio engine")
}
}
func stopRecording() {
self.audioEngine.stop()
self.recognitionRequest?.endAudio()
self.recognitionRequest = nil
self.recognitionTask = nil
self.recognitionTask?.cancel()
}
}

AudioKit Conflict between Midi Instrument and Mic behavior

I am trying to make my app produce midi notes at the same time listening to the input from the mic:
var engine = AudioEngine()
var initialDevice: Device!
var mic: AudioEngine.InputNode!
var tappableNodeA: Fader!
var tappableNodeB: Fader!
var tappableNodeC: Fader!
var silence: Fader!
var tracker: PitchTap!
private var instrument = MIDISampler(name: "Instrument 1")
func noteOn(note: MIDINoteNumber) {
instrument.play(noteNumber: note, velocity: 90, channel: 0)
}
func noteOff(note: MIDINoteNumber) {
instrument.stop(noteNumber: note, channel: 0)
}
override func viewDidLoad() {
super.viewDidLoad()
print("init started ")
guard let input = engine.input else { fatalError() }
guard let device = engine.inputDevice else { fatalError() }
print("input selected")
initialDevice = device
engine.output = instrument
mic = input
tappableNodeA = Fader(mic)
tappableNodeB = Fader(tappableNodeA)
tappableNodeC = Fader(tappableNodeB)
silence = Fader(tappableNodeC, gain: 0)
engine.output = silence
print("objects init")
tracker = PitchTap(mic) { pitch, amp in
DispatchQueue.main.async {
self.update(pitch[0], amp[0])
}
}
start()
// other init that are not related
}
The start function is written below:
func start() {
do {
if let fileURL = Bundle.main.url(forResource: "Sounds/Sampler Instruments/sawPiano1", withExtension: "exs") {
try instrument.loadInstrument(url: fileURL)
} else {
Log("Could not find file")
}
} catch {
Log("Could not load instrument")
}
do {
try engine.start()
tracker.start()
} catch let err {
print("caught error at start")
Log(err)
}
}
As long as I making the first try call to set up the instrument I get the following error:
*** Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: _engine != nil
Why the would the condition be false?
Ok, so the solution was to separate the calls into two functions, and position the first call before tapNode configuration:
var engine = AudioEngine()
var initialDevice: Device!
var mic: AudioEngine.InputNode!
var tappableNodeA: Fader!
var tappableNodeB: Fader!
var tappableNodeC: Fader!
var silence: Fader!
var tracker: PitchTap!
private var instrument = MIDISampler(name: "Instrument 1")
func noteOn(note: MIDINoteNumber) {
instrument.play(noteNumber: note, velocity: 90, channel: 0)
}
func noteOff(note: MIDINoteNumber) {
instrument.stop(noteNumber: note, channel: 0)
}
override func viewDidLoad() {
super.viewDidLoad()
print("init started ")
guard let input = engine.input else { fatalError() }
guard let device = engine.inputDevice else { fatalError() }
print("input selected")
initialDevice = device
engine.output = instrument
start1()
mic = input
tappableNodeA = Fader(mic)
tappableNodeB = Fader(tappableNodeA)
tappableNodeC = Fader(tappableNodeB)
silence = Fader(tappableNodeC, gain: 0)
engine.output = silence
print("objects init")
tracker = PitchTap(mic) { pitch, amp in
DispatchQueue.main.async {
self.update(pitch[0], amp[0])
}
}
start()
// other init that are not related
}
func start1(){
do {
if let fileURL = Bundle.main.url(forResource: "Sounds/Sampler Instruments/sawPiano1", withExtension: "exs") {
try instrument.loadInstrument(url: fileURL)
} else {
Log("Could not find file")
}
} catch let err {
Log("Could not load instrument")
Log(err)
}
}
func start() {
do {
try engine.start()
tracker.start()
} catch let err {
print("caught error at start")
Log(err)
}
}
Although the exception is now gone, there is still no sound being played for some reason.

Error with Apple's SFSpeechRecognition when app in Background Mode (phone locked)

I am attempting to use Apple's SFSpeechRecognition (xcode 9, ios11, swift 4). The following code uses a timer to start a new request for speech recognition. I have background capability on, microphone usage granted and speech recognition granted. If the phone is unlocked, everything works as expected. But when the phone is locked, I receive the following error:
2018-02-20 22:24:47.522562-0500 Speech-Recognition-Demo[3505:1234188] [Utility] +[AFAggregator logDictationFailedWithError:] Error Domain=kAFAssistantErrorDomain Code=1700 "(null)"
According to this link, speech recognition doesn't seem to work when app is in background mode but this information is old. I'm hoping someone has solved this or found a workaround.
Has anyone solved this problem or can anyone suggest something to try? My alternative is to require the Apple Watch for my app and I'd REALLY like to avoid that...
import UIKit
import Speech
class SpeechDetectionViewController: UIViewController,
SFSpeechRecognizerDelegate {
#IBOutlet weak var detectedTextLabel: UILabel!
#IBOutlet weak var colorView: UIView!
#IBOutlet weak var startButton: UIButton!
let audioEngine = AVAudioEngine()
let speechRecognizer: SFSpeechRecognizer? = SFSpeechRecognizer()
var request: SFSpeechAudioBufferRecognitionRequest?
var recognitionTask: SFSpeechRecognitionTask?
var isRecording = false
// timers
var timer = Timer()
let timerInterval = 5.0
var secondsElapsed = 0
// toggle for taking commands
var takeCommands = true
override func viewDidLoad() {
super.viewDidLoad()
self.requestSpeechAuthorization()
timer = Timer.scheduledTimer(timeInterval: timerInterval,target: self,selector: #selector(timerAction(timer:)) ,userInfo: nil,repeats: true)
}
#objc func timerAction(timer:Timer){
/* if takeCommands {
startRecording()
} else {
stopRecording()
}
takeCommands = !takeCommands
print("takeCommands: \(takeCommands)")
*/
startRecording()
}
//MARK: IBActions and Cancel
#IBAction func startButtonTapped(_ sender: UIButton) {
startRecording()
}
func startRecording(){
if isRecording {
print("STOP talking.")
request?.endAudio() // Added line to mark end of recording
request = nil
//audioEngine.stop()
if let node = audioEngine.inputNode {
node.removeTap(onBus: 0)
}
recognitionTask?.cancel()
isRecording = false
startButton.backgroundColor = UIColor.gray
} else {
print("START talking.")
self.recordAndRecognizeSpeech()
isRecording = true
startButton.backgroundColor = UIColor.red
}
}
func stopRecording() {
//audioEngine.stop()
if let node = audioEngine.inputNode {
node.removeTap(onBus: 0)
}
recognitionTask?.cancel()
isRecording = false
startButton.backgroundColor = UIColor.gray
}
//MARK: - Recognize Speech
func recordAndRecognizeSpeech() {
request = SFSpeechAudioBufferRecognitionRequest()
guard let node = audioEngine.inputNode else { return }
let recordingFormat = node.outputFormat(forBus: 0)
node.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { buffer, _ in
self.request?.append(buffer)
}
if !audioEngine.isRunning {
audioEngine.prepare()
do {
try audioEngine.start()
} catch {
self.sendAlert(message: "There has been an audio engine error.")
return print(error)
}
}
guard let myRecognizer = SFSpeechRecognizer() else {
self.sendAlert(message: "Speech recognition is not supported for your current locale.")
return
}
if !myRecognizer.isAvailable {
self.sendAlert(message: "Speech recognition is not currently available. Check back at a later time.")
// Recognizer is not available right now
return
}
recognitionTask = speechRecognizer?.recognitionTask(with: request!, resultHandler: { result, error in
if result != nil { // check to see if result is empty (i.e. no speech found)
if let result = result {
let bestString = result.bestTranscription.formattedString
self.detectedTextLabel.text = bestString
var lastString: String = ""
for segment in result.bestTranscription.segments {
let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
lastString = bestString.substring(from: indexTo)
}
self.checkForColorsSaid(resultString: lastString)
} else if let error = error {
self.sendAlert(message: "There has been a speech recognition error")
print(error)
}
}
})
}
//MARK: - Check Authorization Status
func requestSpeechAuthorization() {
SFSpeechRecognizer.requestAuthorization { authStatus in
OperationQueue.main.addOperation {
switch authStatus {
case .authorized:
self.startButton.isEnabled = true
case .denied:
self.startButton.isEnabled = false
self.detectedTextLabel.text = "User denied access to speech recognition"
case .restricted:
self.startButton.isEnabled = false
self.detectedTextLabel.text = "Speech recognition restricted on this device"
case .notDetermined:
self.startButton.isEnabled = false
self.detectedTextLabel.text = "Speech recognition not yet authorized"
}
}
}
}
//MARK: - UI / Set view color.
func checkForColorsSaid(resultString: String) {
switch resultString {
case "red":
colorView.backgroundColor = UIColor.red
case "orange":
colorView.backgroundColor = UIColor.orange
case "yellow":
colorView.backgroundColor = UIColor.yellow
case "green":
colorView.backgroundColor = UIColor.green
case "blue":
colorView.backgroundColor = UIColor.blue
case "purple":
colorView.backgroundColor = UIColor.purple
case "black":
colorView.backgroundColor = UIColor.black
case "white":
colorView.backgroundColor = UIColor.white
case "gray":
colorView.backgroundColor = UIColor.gray
default: break
}
}
//MARK: - Alert
func sendAlert(message: String) {
let alert = UIAlertController(title: "Speech Recognizer Error", message: message, preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "OK", style: UIAlertActionStyle.default, handler: nil))
self.present(alert, animated: true, completion: nil)
}
}

Streaming Audio stops after SFSpeechRecognitionTask starts

I'm streaming audio file and it works perfectly and I'm also doing speech recognition task and it also works perfectly. But when the below scenario occurs it stops streaming audio file.
I start speech recognition it detects speech perfectly
Now i start streaming the audio file it does not play audio after that.
Below is my code in ViewController.m
//
// ViewController.swift
// RecordAndPlay
//
// Created by Obaid on 8/15/17.
// Copyright © 2017 test. All rights reserved.
//
import UIKit
import AVFoundation
import AudioToolbox
import Speech
class ViewController: UIViewController,SFSpeechRecognizerDelegate {
#IBOutlet var imgBackground: UIImageView!
#IBOutlet var lblTextValue: UILabel!
#IBOutlet var btnPlay: UIButton!
#IBOutlet var btnRepeatIt: UIButton!
#IBOutlet var microphoneButton: UIButton!
var stringValue: String!
var indexValue=0
var playButton:UIButton?
var player: AVPlayer! = nil
var playerItem:AVPlayerItem?
private let speechRecognizer = SFSpeechRecognizer(locale: Locale.init(identifier: "en-US"))!
var recordButton: UIButton!
var recordingSession: AVAudioSession!
var audioRecorder: AVAudioRecorder!
private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
private var recognitionTask: SFSpeechRecognitionTask?
private let audioEngine = AVAudioEngine()
var arrayOfStrings: [String] = ["good morning", "good evening", "good night", "good afternoon", "well developed"]
var arrayOfStringsURL: [String] = ["https://storage.googleapis.com/abdul-sample-bucket/test/sampl1.mp3", "http://dictionary.cambridge.org/media/english/us_pron/u/usc/uscld/uscld00580.mp3", "http://dictionary.cambridge.org/media/english/us_pron/u/usc/uscld/uscld00588.mp3", "http://dictionary.cambridge.org/media/english/us_pron/u/usc/uscld/uscld00579.mp3", "http://dictionary.cambridge.org/media/english/uk_pron/u/ukw/ukwel/ukwelde019.mp3"]
override func viewDidLoad() {
super.viewDidLoad()
initializeSpeechRecognition()
// Do any additional setup after loading the view, typically from a nib.
//createLabels()
check_record_permission()
lblTextValue.text=arrayOfStrings[0]
imgBackground.backgroundColor=UIColor.yellow
imgBackground.frame = CGRect(x: 0, y: 140, width: self.view.frame.width, height: self.imgBackground.frame.height)
btnPlay.setImage(UIImage(named: "play.png"), for: .normal)
let xOrigin=self.view.frame.width - 70
btnPlay.frame=CGRect(x:xOrigin, y: self.btnPlay.frame.origin.y, width:self.btnPlay.frame.width, height: self.btnPlay.frame.height)
btnRepeatIt.isHidden=true
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
var btnClicked=0
#IBAction func start_play(_ sender: UIButton){
let url = URL(string: arrayOfStringsURL[indexValue])
print(arrayOfStringsURL[indexValue])
let playerItem:AVPlayerItem = AVPlayerItem(url: url!)
player=nil
player = AVPlayer(playerItem: playerItem)
player!.play()
imgBackground.backgroundColor=UIColor.green
lblTextValue.textColor=UIColor.white
lblTextValue.text="Repeat it!"
btnClicked=1
microphoneButton.isHidden=false
microphoneButton.frame=btnPlay.frame
microphoneButton.setImage(UIImage(named: "record.png"), for: .normal)
btnPlay.isHidden=true
btnRepeatIt.isHidden=false
}
#IBAction func repeatTheSound(_ sender: UIButton){
print("repeat the sound")
let url = URL(string: arrayOfStringsURL[1])
let playerItem:AVPlayerItem = AVPlayerItem(url: url!)
player=nil
player = AVPlayer(playerItem: playerItem)
player!.play()
}
func initializeSpeechRecognition(){
microphoneButton.isHidden=true
speechRecognizer.delegate = self
SFSpeechRecognizer.requestAuthorization { (authStatus) in
var isButtonEnabled = false
switch authStatus {
case .authorized:
isButtonEnabled = true
case .denied:
isButtonEnabled = false
print("User denied access to speech recognition")
case .restricted:
isButtonEnabled = false
print("Speech recognition restricted on this device")
case .notDetermined:
isButtonEnabled = false
print("Speech recognition not yet authorized")
}
OperationQueue.main.addOperation() {
if(self.btnClicked==1){
self.btnPlay.isEnabled = isButtonEnabled
}
}
}
}
var isAudioRecordingGranted: Bool!
func check_record_permission()
{
switch AVAudioSession.sharedInstance().recordPermission() {
case AVAudioSessionRecordPermission.granted:
isAudioRecordingGranted = true
break
case AVAudioSessionRecordPermission.denied:
isAudioRecordingGranted = false
break
case AVAudioSessionRecordPermission.undetermined:
AVAudioSession.sharedInstance().requestRecordPermission() { [unowned self] allowed in
DispatchQueue.main.async {
if allowed {
self.isAudioRecordingGranted = true
} else {
self.isAudioRecordingGranted = false
}
}
}
break
default:
break
}
}
//Speech Recognition
#IBAction func microphoneTapped(_ sender: AnyObject) {
if audioEngine.isRunning {
audioEngine.stop()
recognitionRequest?.endAudio()
} else {
startRecording()
}
}
func startRecording() {
if recognitionTask != nil { //1
recognitionTask?.cancel()
recognitionTask = nil
}
let audioSession = AVAudioSession.sharedInstance() //2
do {
try audioSession.setCategory(AVAudioSessionCategoryRecord)
try audioSession.setMode(AVAudioSessionModeMeasurement)
try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
}
recognitionRequest = SFSpeechAudioBufferRecognitionRequest() //3
guard let inputNode = audioEngine.inputNode else {
fatalError("Audio engine has no input node")
} //4
guard let recognitionRequest = recognitionRequest else {
fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
} //5
recognitionRequest.shouldReportPartialResults = true //6
recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in //7
var isFinal = false //8
if result != nil {
self.stringValue = result?.bestTranscription.formattedString //9
isFinal = (result?.isFinal)!
if(isFinal){
self.stringValue = self.stringValue.lowercased()
print(self.stringValue)
if(self.stringValue.contains(self.arrayOfStrings[0])){
self.imgBackground.backgroundColor=UIColor.green
self.lblTextValue.text="Well Done"
self.stringValue=""
self.updatePage()
}
else{
self.imgBackground.backgroundColor=UIColor.red
self.lblTextValue.text="Wrong! Try Again"
self.stringValue=""
}
}
self.stopRecording()
}
if error != nil || isFinal { //10
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
self.recognitionRequest = nil
self.recognitionTask = nil
}
})
let recordingFormat = inputNode.outputFormat(forBus: 0) //11
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
self.recognitionRequest?.append(buffer)
}
audioEngine.prepare() //12
do {
try audioEngine.start()
} catch {
print("audioEngine couldn't start because of an error.")
}
}
func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
if available {
microphoneButton.isEnabled = true
} else {
microphoneButton.isEnabled = false
}
}
func updatePage(){
print(indexValue)
if(indexValue<self.arrayOfStrings.count){
indexValue += 1
self.imgBackground.backgroundColor=UIColor.yellow
self.lblTextValue.textColor=UIColor.black
self.lblTextValue.text=self.arrayOfStrings[indexValue]
self.btnPlay.isHidden=false
self.microphoneButton.isHidden=true
}
else{
indexValue=0
}
}
func stopRecording(){
if audioEngine.isRunning {
audioEngine.stop()
recognitionRequest?.endAudio()
self.recognitionTask = nil
}
}
}
startRecording() method is when I start the speechRecognition
start_play() method is when I play the audio
something is going in the background that needs to be stopped after i start speechRecognition.