I have the following code :
let speechRecognizer = SFSpeechRecognizer()!
let audioEngine = AVAudioEngine()
var recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
var recognitionTask = SFSpeechRecognitionTask()
var audioPlayer : AVAudioPlayer!
override func viewDidLoad() {
super.viewDidLoad()
playSound(sound: "oops")
speechRecognizer.delegate = self
requestSpeechAuth()
}
func requestSpeechAuth(){
SFSpeechRecognizer.requestAuthorization { (authStatus) in
OperationQueue.main.addOperation({
switch authStatus {
case.authorized:
print("authorized")
case.denied:
print("denied")
case.restricted:
print("restricted")
case.notDetermined:
print("not determined")
}
})
}
}
// Function called when I press on my record button
func SpeechButtonDown() {
print("Start recording")
if audioEngine.isRunning {
endRecording() {
} else {
do {
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(AVAudioSessionCategoryRecord)
try audioSession.setMode(AVAudioSessionModeMeasurement)
try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
if let inputNode = audioEngine.inputNode {
recognitionRequest.shouldReportPartialResults = true
recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
print("1")
if let result = result {
self.instructionLabel.text = result.bestTranscription.formattedString
print("2")
if result.isFinal {
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
if self.instructionLabel.text != "" {
self.compareWordwithVoice()
}
}
}
})
let recognitionFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recognitionFormat, block: { (buffer, when) in
self.recognitionRequest.append(buffer)
})
audioEngine.prepare()
try audioEngine.start()
}
} catch {
}
}
}
// Function called when I release the record button
func EndRecording() {
endRecording()
print("Stop recording")
}
func endRecording() {
audioEngine.stop()
recognitionRequest.endAudio()
audioEngine.inputNode?.removeTap(onBus: 0)
}
func playSound(sound: String) {
if let url = Bundle.main.url(forResource: sound, withExtension: "wav") {
do {
audioPlayer = try AVAudioPlayer(contentsOf: url)
guard let player = audioPlayer else { return }
player.prepareToPlay()
player.play()
print("tutu")
} catch let error {
print(error.localizedDescription)
}
}
}
func compareWordwithVoice() {
let StringToLearn = setWordToLearn()
print("StringToLearn : \(StringToLearn)")
if let StringRecordedFull = instructionLabel.text{
let StringRecorded = (StringRecordedFull as NSString).replacingOccurrences(of: " ", with: "").lowercased()
print("StringRecorded : \(StringRecorded)")
if StringRecorded == "appuyezsurleboutonendessousetprenoncezl’expression" {
print("not yet")
} else {
if StringToLearn == StringRecorded {
playSound(sound: "success")
print("success")
// update UI
} else {
playSound(sound: "oops")
print("oops")
// update UI
}
}
}
}
func setWordToLearn() -> String {
if let wordToLearnFull = expr?.expression {
print(wordToLearnFull)
var wordToLearn = (wordToLearnFull as NSString).replacingOccurrences(of: " ", with: "").lowercased()
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: ".", with: "")
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: "!", with: "")
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: "?", with: "")
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: ",", with: "")
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: "/", with: "")
print(wordToLearn)
return wordToLearn
}
print("no wordToLearn")
return ""
}
The problem is that the playSound works perfectly when it is in the viewDidLoad but doesn't work when it is called by the compareThing() function but it display "tutu" on both cases so it performs the playSound function every time.
Can the problem be if AVAudioPlayer and AVAudioEngine cannot work at the same time ?
Thx
Ive experienced the same thing with my code and from searching online it seems like there is an unspoken bug "when using AvAudioPlayer and Engine separately"
I got the information from the following link. I did not find anything else online that states why this bug happens though.
https://swiftios8dev.wordpress.com/2015/03/05/sound-effects-using-avaudioengine/
The suggestion was to use AVAudioEngine for everything.
I think "compareThings" always plays "oops" sound and this sound is not good (too quiet or broken).
Please try to play "oops" sound from "viewDidLoad" func to make sure sound is okay.
If it is okay (I don't think so) - set breakpoint in "playSound" func to see what is going on (sound name, does it exists etc).
Related
video not playing when app rebuild (file path url saving into coreData) using AVCaptureSession
filepath not changing before and after rebuild.
file:///private/var/mobile/Containers/Data/Application/3DA93FBC-9A20-40B4-A017-B3D5C7768301/tmp/63F6CEED-3202-4F5F-999B-5F138D73635D.mp4
i did all the ways, nothing works
here my code for record the video
func setupPreview() {
// Configure previewLayer
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = shapeLayer.bounds
previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
shapeLayer.layer.addSublayer(previewLayer!)
}
func setupSession() -> Bool {
captureSession.sessionPreset = AVCaptureSession.Preset.high
// Setup Camera
let camera = AVCaptureDevice.default(for: AVMediaType.video)!
do {
let input = try AVCaptureDeviceInput(device: camera)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
activeInput = input
}
} catch {
print("Error setting device video input: \(error)")
return false
}
// Setup Microphone
let microphone = AVCaptureDevice.default(for: AVMediaType.audio)!
do {
let micInput = try AVCaptureDeviceInput(device: microphone)
if captureSession.canAddInput(micInput) {
captureSession.addInput(micInput)
}
} catch {
print("Error setting device audio input: \(error)")
return false
}
// Movie output
if captureSession.canAddOutput(movieOutput) {
captureSession.addOutput(movieOutput)
}
return true
}
func startSession() {
if !captureSession.isRunning {
videoQueue().async {
self.captureSession.startRunning()
}
}
}
func stopSession() {
if captureSession.isRunning {
videoQueue().async {
self.captureSession.stopRunning()
}
}
}
func videoQueue() -> DispatchQueue {
return DispatchQueue.main
}
func currentVideoOrientation() -> AVCaptureVideoOrientation {
var orientation: AVCaptureVideoOrientation
switch UIDevice.current.orientation {
case .portrait:
orientation = AVCaptureVideoOrientation.portrait
case .landscapeRight:
orientation = AVCaptureVideoOrientation.landscapeLeft
case .portraitUpsideDown:
orientation = AVCaptureVideoOrientation.portraitUpsideDown
default:
orientation = AVCaptureVideoOrientation.landscapeRight
}
return orientation
}
func startRecording() {
if movieOutput.isRecording == false {
save.setTitle("stop", for: UIControl.State.normal)
let connection = movieOutput.connection(with: AVMediaType.video)
if (connection?.isVideoOrientationSupported)! {
connection?.videoOrientation = currentVideoOrientation()
}
if (connection?.isVideoStabilizationSupported)! {
connection?.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
}
let device = activeInput.device
if (device.isSmoothAutoFocusSupported) {
do {
try device.lockForConfiguration()
device.isSmoothAutoFocusEnabled = false
device.unlockForConfiguration()
} catch {
print("Error setting configuration: \(error)")
}
}
//EDIT2: And I forgot this
outputURL = tempURL()
movieOutput.startRecording(to: outputURL, recordingDelegate: self)
}
else {
stopRecording()
}
}
func tempURL() -> URL? {
let directory = NSTemporaryDirectory() as NSString
let path = directory.appendingPathComponent(NSUUID().uuidString + ".mp4")
path22 = path
let directoryURL: URL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
let folderPath: URL = directoryURL.appendingPathComponent("Downloads", isDirectory: true)
let fileURL: URL = folderPath.appendingPathComponent(path)
return URL(fileURLWithPath: path)
}
func stopRecording() {
if movieOutput.isRecording == true {
movieOutput.stopRecording()
}
}
here saving into coredata
let managedObject = self.managedObjectContext
entity = NSEntityDescription.entity(forEntityName: "MediaData", in: managedObject!)
let personMO = NSManagedObject(entity: entity, insertInto: managedObject)
personMO.setValue("\(self.videoURL!)", forKey: "videosS")
personMO.setValue(dataImage, forKey: "thumbnails")
print(personMO)
do
{
try managedObject?.save()
print("video saved")
}
catch
{
print("Catch Erroe : Failed To
}
let appdel = UIApplication.shared.delegate as! AppDelegate
appdel.avplayer = AVPlayer(url: videoURL!)
print(videoURL!)
let playerLayer = AVPlayerLayer(player: appdel.avplayer)
playerLayer.frame = self.view.bounds
self.view.layer.addSublayer(playerLayer)
appdel.avplayer?.play()
You must never save a full filepath into CoreData or anywhere else. File paths are not persistent. Your app is sandboxed. The sandbox path can change at any time, especially between launches and installations.
Instead, save the file name and reconstruct the path each time you need it. Just as you are calling FileManager.default.urls(for: .documentDirectory...) to construct the file path initially, so you must call it every time you want to access this file.
I am trying to use the AVAudioEngine to play a button sound. But unfortunately the sound file is played only once.
The Idea is, that the user taps on the button, a sound plays and the recording starts. After the user taps on the button again, a second sound should be playing indicating, that the recording session has been ended.
So far the first sound appears, and the recording starts.
Unfortunately the second sound (the ending sound) wont be played.
And I have found out, that when I am using the same AudioEngine as the recording function, the sound wont be played at all.
As I am completely new to the AVFoundation Framework, I am not sure what the issue here is.
Thank in advance.
var StartSoundEngineScene1 = AVAudioEngine()
var StartSoundNodeScene1 = AVAudioPlayerNode()
func SetupAudio(AudioEngine: AVAudioEngine, SoundNode: AVAudioPlayerNode, FileURL: URL) {
guard let AudioFile = try? AVAudioFile(forReading: FileURL) else{ return }
let AudioSession = AVAudioSession.sharedInstance()
AudioEngine.attach(SoundNode)
AudioEngine.connect(SoundNode, to: AudioEngine.mainMixerNode, format: AudioFile.processingFormat)
AudioEngine.prepare()
}
override func viewDidLoad() {
super.viewDidLoad()
SetupAudio(AudioEngine: StartSoundEngineScene1, SoundNode: StartSoundNodeScene1, FileURL: StartRecSound)
}
func ButtonSound (AudioEngine: AVAudioEngine, SoundNode: AVAudioPlayerNode, FileURL: URL){
try? AudioEngine.start()
guard let audioFile = try? AVAudioFile(forReading: FileURL) else{ return }
SoundNode.scheduleFile(audioFile, at: nil, completionHandler: nil)
SoundNode.volume = 0.16
SoundNode.play()
}
func StartRecording(){
ButtonSound(AudioEngine: StartSoundEngineScene1, SoundNode: StartSoundNodeScene1, FileURL: StartRecSound)
Timer.scheduledTimer(withTimeInterval: 0.7, repeats: false) { timer in
if audioEngine.isRunning {
audioEngine.stop()
recognitionRequest?.endAudio()
} else {
print("Rercording Started")
if let recognitionTask = self.recognitionTask {
recognitionTask.cancel()
self.recognitionTask = nil
}
self.recordedMessage = ""
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSession.Category.record)
try audioSession.setMode(AVAudioSession.Mode.measurement)
}catch {
print(error)
}
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
guard let recognitionRequest = self.recognitionRequest else {
fatalError("Unable to create a speech audio buffer")
}
recognitionRequest.shouldReportPartialResults = true
recognitionRequest.requiresOnDeviceRecognition = true
recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
var isFinal = false
if let result = result {
let sentence = result.bestTranscription.formattedString
self.recordedMessage = sentence
print (self.recordedMessage)
isFinal = result.isFinal
}
if error != nil || isFinal {
self.audioEngine.stop()
self.audioEngine.inputNode.removeTap(onBus: 0)
self.recognitionRequest = nil
self.recognitionTask = nil
self.RecordBtn.isEnabled = true
}
})
let recordingFormat = audioEngine.inputNode.outputFormat(forBus: 0)
audioEngine.inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
self.recognitionRequest?.append(buffer)
}
audioEngine.prepare()
do{
try audioEngine.start()
}catch {
print(error)
}
}
}
}
func StopRecording(){
if audioEngine.isRunning{
audioEngine.stop()
ButtonSound(AudioEngine: StartSoundEngineScene1, SoundNode: StartSoundNodeScene1, FileURL: StopRecSound)
recognitionRequest?.endAudio()
audioEngine.inputNode.removeTap(onBus: 0)
}
}
You set the AVAudioSessionCategory as record.
try audioSession.setCategory(AVAudioSession.Category.record)
If you want to play and Record concurrently, You should set this category playAndRecord
And... If you change the AVAudioSession during playing or recording, AVAudioEngine's configuration will be changed then It fires the AVAudioEngineConfigurationChange notification.
I'm using SFSpeechRecognizer in my app which is working fine to ease the end user entering a comment in a UITextView thanks to a dedicated button (Start Speech Recognition).
But if the user is typing some text manually first and then starts its Speech Recognition, the previous text entered manually is erased. This is also the case if the user is performing two times a Speech Recognition (user is "speech" recording a first part of its text, then stop recording, and finally restart recording) on the same UITextView, the previous text is erased.
Hence, I would like to know how I can append text recognized by SFSpeechRecognizer to the existing one.
Here is my code:
func recordAndRecognizeSpeech(){
if recognitionTask != nil {
recognitionTask?.cancel()
recognitionTask = nil
}
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSessionCategoryRecord)
try audioSession.setMode(AVAudioSessionModeMeasurement)
try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
}
self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
guard let inputNode = audioEngine.inputNode else {
fatalError("Audio engine has no input node")
}
let recognitionRequest = self.recognitionRequest
recognitionRequest.shouldReportPartialResults = true
recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
var isFinal = false
self.decaration.text = (result?.bestTranscription.formattedString)!
isFinal = (result?.isFinal)!
let bottom = NSMakeRange(self.decaration.text.characters.count - 1, 1)
self.decaration.scrollRangeToVisible(bottom)
if error != nil || isFinal {
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
self.recognitionTask = nil
self.recognitionRequest.endAudio()
self.oBtSpeech.isEnabled = true
}
})
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
self.recognitionRequest.append(buffer)
}
audioEngine.prepare()
do {
try audioEngine.start()
} catch {
print("audioEngine couldn't start because of an error.")
}
}
I tried to update
self.decaration.text = (result?.bestTranscription.formattedString)!
by
self.decaration.text += (result?.bestTranscription.formattedString)!
but it makes a doubloon for each sentence recognized.
Any idea how I can do that ?
Try saving the text before starting the recognition system.
func recordAndRecognizeSpeech(){
// one change here
let defaultText = self.decaration.text
if recognitionTask != nil {
recognitionTask?.cancel()
recognitionTask = nil
}
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSessionCategoryRecord)
try audioSession.setMode(AVAudioSessionModeMeasurement)
try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
}
self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
guard let inputNode = audioEngine.inputNode else {
fatalError("Audio engine has no input node")
}
let recognitionRequest = self.recognitionRequest
recognitionRequest.shouldReportPartialResults = true
recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
var isFinal = false
// one change here
self.decaration.text = defaultText + " " + (result?.bestTranscription.formattedString)!
isFinal = (result?.isFinal)!
let bottom = NSMakeRange(self.decaration.text.characters.count - 1, 1)
self.decaration.scrollRangeToVisible(bottom)
if error != nil || isFinal {
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
self.recognitionTask = nil
self.recognitionRequest.endAudio()
self.oBtSpeech.isEnabled = true
}
})
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
self.recognitionRequest.append(buffer)
}
audioEngine.prepare()
do {
try audioEngine.start()
} catch {
print("audioEngine couldn't start because of an error.")
}
}
result?.bestTranscription.formattedString returns the entire phrase that was recognised, thats why you should reset self.decaration.text each time you get a response from SFSpeechRecognnizer.
I am working on an application that requires recording video and I found a helper class written by appcoda and here is a link to the github repo https://github.com/appcoda/FullScreenCamera but the problem I am having with it is that it has when ever I run it, I get an error in the console saying noCamerasAvailable followed by captureSessionIsMissing and I am also commited to Improving the code and I just cannot figure out why. Here is the helper class
class CameraHelper: NSObject {
var captureSession: AVCaptureSession?
var currentCameraPosition: CameraPosition?
var frontCamera: AVCaptureDevice?
var frontCameraInput: AVCaptureDeviceInput?
var photoOutput: AVCapturePhotoOutput?
var rearCamera: AVCaptureDevice?
var rearCameraInput: AVCaptureDeviceInput?
var previewLayer: AVCaptureVideoPreviewLayer?
var flashMode = AVCaptureDevice.FlashMode.off
var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}
extension CameraHelper {
func prepare(completionHandler: #escaping (Error?) -> Void) {
func createCaptureSession() {
self.captureSession = AVCaptureSession()
}
func configureCaptureDevices() throws {
let session = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .unspecified)
let cameras = session.devices.compactMap { $0 }
guard !cameras.isEmpty else { throw CameraHelperError.noCamerasAvailable }
for camera in cameras {
if camera.position == .front {
self.frontCamera = camera
}
if camera.position == .back {
self.rearCamera = camera
try camera.lockForConfiguration()
camera.focusMode = .continuousAutoFocus
camera.unlockForConfiguration()
}
}
}
func configureDeviceInputs() throws {
guard let captureSession = self.captureSession else { throw CameraHelperError.captureSessionIsMissing }
if let rearCamera = self.rearCamera {
self.rearCameraInput = try AVCaptureDeviceInput(device: rearCamera)
if captureSession.canAddInput(self.rearCameraInput!) { captureSession.addInput(self.rearCameraInput!) }
self.currentCameraPosition = .rear
}
else if let frontCamera = self.frontCamera {
self.frontCameraInput = try AVCaptureDeviceInput(device: frontCamera)
if captureSession.canAddInput(self.frontCameraInput!) { captureSession.addInput(self.frontCameraInput!) }
else { throw CameraHelperError.inputsAreInvalid }
self.currentCameraPosition = .front
}
else { throw CameraHelperError.noCamerasAvailable }
}
func configurePhotoOutput() throws {
guard let captureSession = self.captureSession else { throw CameraHelperError.captureSessionIsMissing }
self.photoOutput = AVCapturePhotoOutput()
self.photoOutput!.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey : AVVideoCodecType.jpeg])], completionHandler: nil)
if captureSession.canAddOutput(self.photoOutput!) { captureSession.addOutput(self.photoOutput!) }
captureSession.startRunning()
}
DispatchQueue(label: "prepare").async {
do {
createCaptureSession()
try configureCaptureDevices()
try configureDeviceInputs()
try configurePhotoOutput()
}
catch {
DispatchQueue.main.async {
completionHandler(error)
}
return
}
DispatchQueue.main.async {
completionHandler(nil)
}
}
}
func displayPreview(on view: UIView) throws {
guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraHelperError.captureSessionIsMissing }
self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = .portrait
view.layer.insertSublayer(self.previewLayer!, at: 0)
self.previewLayer?.frame = view.frame
}
func switchCameras() throws {
guard let currentCameraPosition = currentCameraPosition, let captureSession = self.captureSession, captureSession.isRunning else { throw CameraHelperError.captureSessionIsMissing }
captureSession.beginConfiguration()
func switchToFrontCamera() throws {
guard let rearCameraInput = self.rearCameraInput, captureSession.inputs.contains(rearCameraInput),
let frontCamera = self.frontCamera else { throw CameraHelperError.invalidOperation }
self.frontCameraInput = try AVCaptureDeviceInput(device: frontCamera)
captureSession.removeInput(rearCameraInput)
if captureSession.canAddInput(self.frontCameraInput!) {
captureSession.addInput(self.frontCameraInput!)
self.currentCameraPosition = .front
}
else {
throw CameraHelperError.invalidOperation
}
}
func switchToRearCamera() throws {
guard let frontCameraInput = self.frontCameraInput, captureSession.inputs.contains(frontCameraInput),
let rearCamera = self.rearCamera else { throw CameraHelperError.invalidOperation }
self.rearCameraInput = try AVCaptureDeviceInput(device: rearCamera)
captureSession.removeInput(frontCameraInput)
if captureSession.canAddInput(self.rearCameraInput!) {
captureSession.addInput(self.rearCameraInput!)
self.currentCameraPosition = .rear
}
else { throw CameraHelperError.invalidOperation }
}
switch currentCameraPosition {
case .front:
try switchToRearCamera()
case .rear:
try switchToFrontCamera()
}
captureSession.commitConfiguration()
}
func captureImage(completion: #escaping (UIImage?, Error?) -> Void) {
guard let captureSession = captureSession, captureSession.isRunning else { completion(nil, CameraHelperError.captureSessionIsMissing); return }
let settings = AVCapturePhotoSettings()
settings.flashMode = self.flashMode
self.photoOutput?.capturePhoto(with: settings, delegate: self)
self.photoCaptureCompletionBlock = completion
}
}
extension CameraHelper: AVCapturePhotoCaptureDelegate {
public func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?,
resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Swift.Error?) {
if let error = error { self.photoCaptureCompletionBlock?(nil, error) }
else if let buffer = photoSampleBuffer, let data = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: buffer, previewPhotoSampleBuffer: nil),
let image = UIImage(data: data) {
self.photoCaptureCompletionBlock?(image, nil)
}
else {
self.photoCaptureCompletionBlock?(nil, CameraHelperError.unknown)
}
}
}
extension CameraHelper {
enum CameraHelperError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
public enum CameraPosition {
case front
case rear
}
}
that is the helper class.
Use .builtInWideAngleCamera instead of .builtInDualCamera for Setup AVCaptureDevice.
Replace this function on your Project
func configureCaptureDevices() throws {
let session = AVCaptureDevice.DiscoverySession(deviceTypes: [. builtInWideAngleCamera], mediaType: AVMediaType.video, position: .unspecified)
let cameras = session.devices.compactMap { $0 }
guard !cameras.isEmpty else { throw CameraHelperError.noCamerasAvailable }
for camera in cameras {
if camera.position == .front {
self.frontCamera = camera
}
if camera.position == .back {
self.rearCamera = camera
try camera.lockForConfiguration()
camera.focusMode = .continuousAutoFocus
camera.unlockForConfiguration()
}
}
}
This seems to work to switch the camera from the back to the front, but I'm trying to come up with an 'if' statement so that I can switch it back too. Any ideas or advice?
#IBAction func didTouchSwitchButton(sender: UIButton) {
let camera = getDevice(.Front)
let cameraBack = getDevice(.Back)
do {
input = try AVCaptureDeviceInput(device: camera)
} catch let error as NSError {
print(error)
input = nil
}
if(captureSession?.canAddInput(input) == true){
captureSession?.addInput(input)
stillImageOutput?.outputSettings = [AVVideoCodecKey : AVVideoCodecJPEG]
if(captureSession?.canAddOutput(stillImageOutput) == true){
captureSession?.addOutput(stillImageOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = cameraView.bounds
cameraView.layer.addSublayer(previewLayer!)
captureSession?.startRunning()
}
}
}
func switchCamera(_ sender: UIButton) {
if let session = AVCaptureSession() {
let currentCameraInput: AVCaptureInput = session.inputs[0]
session.removeInput(currentCameraInput)
var newCamera: AVCaptureDevice
newCamera = AVCaptureDevice.default(for: AVMediaType.video)!
if (currentCameraInput as! AVCaptureDeviceInput).device.position == .back {
UIView.transition(with: self.cameraView, duration: 0.5, options: .transitionFlipFromLeft, animations: {
newCamera = self.cameraWithPosition(.front)!
}, completion: nil)
} else {
UIView.transition(with: self.cameraView, duration: 0.5, options: .transitionFlipFromRight, animations: {
newCamera = self.cameraWithPosition(.back)!
}, completion: nil)
}
do {
try self.captureSession?.addInput(AVCaptureDeviceInput(device: newCamera))
}
catch {
print("error: \(error.localizedDescription)")
}
}
}
func cameraWithPosition(_ position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let deviceDescoverySession = AVCaptureDevice.DiscoverySession.init(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
for device in deviceDescoverySession.devices {
if device.position == position {
return device
}
}
return nil
}
First create an enum to check the type of camera:
enum CameraDirection {
case .front
case .back
}
Then create a variable for the enum:
var currentDirection: CameraDirection = .front//or initial direction
Then in your didTouchSwitchButton function:
if (currentDirection == .front) {
currentDirection = .back
} else {
currentDirection = .front
}
reload()
You can see that I called a function called reload. Create that:
func reload() {
let camera = getDevice(.Front)
let cameraBack = getDevice(.Back)
do {
if currentDirection == .front {
input = try AVCaptureDeviceInput(device: camera)
} else {
input = try AVCaptureDeviceInput(device: cameraBack)
}
} catch let error as NSError {
print(error)
input = nil
}
//rest of code
}