Switching Camera with a button in Swift - swift

This seems to work to switch the camera from the back to the front, but I'm trying to come up with an 'if' statement so that I can switch it back too. Any ideas or advice?
#IBAction func didTouchSwitchButton(sender: UIButton) {
let camera = getDevice(.Front)
let cameraBack = getDevice(.Back)
do {
input = try AVCaptureDeviceInput(device: camera)
} catch let error as NSError {
print(error)
input = nil
}
if(captureSession?.canAddInput(input) == true){
captureSession?.addInput(input)
stillImageOutput?.outputSettings = [AVVideoCodecKey : AVVideoCodecJPEG]
if(captureSession?.canAddOutput(stillImageOutput) == true){
captureSession?.addOutput(stillImageOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = cameraView.bounds
cameraView.layer.addSublayer(previewLayer!)
captureSession?.startRunning()
}
}
}

func switchCamera(_ sender: UIButton) {
if let session = AVCaptureSession() {
let currentCameraInput: AVCaptureInput = session.inputs[0]
session.removeInput(currentCameraInput)
var newCamera: AVCaptureDevice
newCamera = AVCaptureDevice.default(for: AVMediaType.video)!
if (currentCameraInput as! AVCaptureDeviceInput).device.position == .back {
UIView.transition(with: self.cameraView, duration: 0.5, options: .transitionFlipFromLeft, animations: {
newCamera = self.cameraWithPosition(.front)!
}, completion: nil)
} else {
UIView.transition(with: self.cameraView, duration: 0.5, options: .transitionFlipFromRight, animations: {
newCamera = self.cameraWithPosition(.back)!
}, completion: nil)
}
do {
try self.captureSession?.addInput(AVCaptureDeviceInput(device: newCamera))
}
catch {
print("error: \(error.localizedDescription)")
}
}
}
func cameraWithPosition(_ position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let deviceDescoverySession = AVCaptureDevice.DiscoverySession.init(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
for device in deviceDescoverySession.devices {
if device.position == position {
return device
}
}
return nil
}

First create an enum to check the type of camera:
enum CameraDirection {
case .front
case .back
}
Then create a variable for the enum:
var currentDirection: CameraDirection = .front//or initial direction
Then in your didTouchSwitchButton function:
if (currentDirection == .front) {
currentDirection = .back
} else {
currentDirection = .front
}
reload()
You can see that I called a function called reload. Create that:
func reload() {
let camera = getDevice(.Front)
let cameraBack = getDevice(.Back)
do {
if currentDirection == .front {
input = try AVCaptureDeviceInput(device: camera)
} else {
input = try AVCaptureDeviceInput(device: cameraBack)
}
} catch let error as NSError {
print(error)
input = nil
}
//rest of code
}

Related

unable to play video when app rebuild swift

video not playing when app rebuild (file path url saving into coreData) using AVCaptureSession
filepath not changing before and after rebuild.
file:///private/var/mobile/Containers/Data/Application/3DA93FBC-9A20-40B4-A017-B3D5C7768301/tmp/63F6CEED-3202-4F5F-999B-5F138D73635D.mp4
i did all the ways, nothing works
here my code for record the video
func setupPreview() {
// Configure previewLayer
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = shapeLayer.bounds
previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
shapeLayer.layer.addSublayer(previewLayer!)
}
func setupSession() -> Bool {
captureSession.sessionPreset = AVCaptureSession.Preset.high
// Setup Camera
let camera = AVCaptureDevice.default(for: AVMediaType.video)!
do {
let input = try AVCaptureDeviceInput(device: camera)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
activeInput = input
}
} catch {
print("Error setting device video input: \(error)")
return false
}
// Setup Microphone
let microphone = AVCaptureDevice.default(for: AVMediaType.audio)!
do {
let micInput = try AVCaptureDeviceInput(device: microphone)
if captureSession.canAddInput(micInput) {
captureSession.addInput(micInput)
}
} catch {
print("Error setting device audio input: \(error)")
return false
}
// Movie output
if captureSession.canAddOutput(movieOutput) {
captureSession.addOutput(movieOutput)
}
return true
}
func startSession() {
if !captureSession.isRunning {
videoQueue().async {
self.captureSession.startRunning()
}
}
}
func stopSession() {
if captureSession.isRunning {
videoQueue().async {
self.captureSession.stopRunning()
}
}
}
func videoQueue() -> DispatchQueue {
return DispatchQueue.main
}
func currentVideoOrientation() -> AVCaptureVideoOrientation {
var orientation: AVCaptureVideoOrientation
switch UIDevice.current.orientation {
case .portrait:
orientation = AVCaptureVideoOrientation.portrait
case .landscapeRight:
orientation = AVCaptureVideoOrientation.landscapeLeft
case .portraitUpsideDown:
orientation = AVCaptureVideoOrientation.portraitUpsideDown
default:
orientation = AVCaptureVideoOrientation.landscapeRight
}
return orientation
}
func startRecording() {
if movieOutput.isRecording == false {
save.setTitle("stop", for: UIControl.State.normal)
let connection = movieOutput.connection(with: AVMediaType.video)
if (connection?.isVideoOrientationSupported)! {
connection?.videoOrientation = currentVideoOrientation()
}
if (connection?.isVideoStabilizationSupported)! {
connection?.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
}
let device = activeInput.device
if (device.isSmoothAutoFocusSupported) {
do {
try device.lockForConfiguration()
device.isSmoothAutoFocusEnabled = false
device.unlockForConfiguration()
} catch {
print("Error setting configuration: \(error)")
}
}
//EDIT2: And I forgot this
outputURL = tempURL()
movieOutput.startRecording(to: outputURL, recordingDelegate: self)
}
else {
stopRecording()
}
}
func tempURL() -> URL? {
let directory = NSTemporaryDirectory() as NSString
let path = directory.appendingPathComponent(NSUUID().uuidString + ".mp4")
path22 = path
let directoryURL: URL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
let folderPath: URL = directoryURL.appendingPathComponent("Downloads", isDirectory: true)
let fileURL: URL = folderPath.appendingPathComponent(path)
return URL(fileURLWithPath: path)
}
func stopRecording() {
if movieOutput.isRecording == true {
movieOutput.stopRecording()
}
}
here saving into coredata
let managedObject = self.managedObjectContext
entity = NSEntityDescription.entity(forEntityName: "MediaData", in: managedObject!)
let personMO = NSManagedObject(entity: entity, insertInto: managedObject)
personMO.setValue("\(self.videoURL!)", forKey: "videosS")
personMO.setValue(dataImage, forKey: "thumbnails")
print(personMO)
do
{
try managedObject?.save()
print("video saved")
}
catch
{
print("Catch Erroe : Failed To
}
let appdel = UIApplication.shared.delegate as! AppDelegate
appdel.avplayer = AVPlayer(url: videoURL!)
print(videoURL!)
let playerLayer = AVPlayerLayer(player: appdel.avplayer)
playerLayer.frame = self.view.bounds
self.view.layer.addSublayer(playerLayer)
appdel.avplayer?.play()
You must never save a full filepath into CoreData or anywhere else. File paths are not persistent. Your app is sandboxed. The sandbox path can change at any time, especially between launches and installations.
Instead, save the file name and reconstruct the path each time you need it. Just as you are calling FileManager.default.urls(for: .documentDirectory...) to construct the file path initially, so you must call it every time you want to access this file.

Recording video and audio with AVAssetWriter in Swift

Im trying to add the devices microphone audio to a video recording from the devices camera. The video is filtered with a CIFilter and works as expected. My problem is the mic audio is not attached to the video once saved.
I have tried setting the audio settings manually like this
let audioSettings : [String : Any] = [
AVFormatIDKey : kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey: 1,
AVSampleRateKey : 44100,
AVEncoderBitRateKey : 64000
]
but using the recommendedAudioSettingsForAssetWriter method seems like the correct approach as the video recording works with the recommendedAudioSettingsForAssetWriter method.
Can anyone tell me how to achieve this or point me in the right direction?
My code so far:
import UIKit
import AVFoundation
class VideoViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
#IBOutlet weak var imageView: UIImageView!
lazy var cameraDevice: AVCaptureDevice? = {
return AVCaptureDevice.default(for: .video)
}()
lazy var micDevice: AVCaptureDevice? = {
return AVCaptureDevice.default(for: .audio)
}()
var captureSession = AVCaptureSession()
var outputURL: URL!
var orientation: AVCaptureVideoOrientation = .landscapeRight
var filterObject = FilterObject()
var assetWriter: AVAssetWriter?
var assetWriterInput: AVAssetWriterInput?
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor?
var fileName = ""
var recordingState = RecordingState.idle
var time: Double = 0
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
let context = CIContext()
override func viewDidLoad() {
super.viewDidLoad()
setupCameraDevice()
setupAudioDevice()
setupInputOutput()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
setUpAuthStatus()
}
#IBAction func recordPressed(_ sender: UIButton) {
switch recordingState {
case .idle:
recordingState = .start
case .capturing:
recordingState = .end
default:
break
}
}
func setUpAuthStatus() {
if AVCaptureDevice.authorizationStatus(for: AVMediaType.video) != .authorized {
AVCaptureDevice.requestAccess(for: AVMediaType.video, completionHandler: { (authorized) in
DispatchQueue.main.async {
if authorized {
self.setupInputOutput()
}
}
})
}
if AVCaptureDevice.authorizationStatus(for: AVMediaType.audio) != .authorized {
AVCaptureDevice.requestAccess(for: AVMediaType.audio, completionHandler: { (authorized) in
DispatchQueue.main.async {
if authorized {
self.setupInputOutput()
}
}
})
}
}
func setupCameraDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == .back {
cameraDevice = device
}
}
}
func setupAudioDevice() {
let audioDeviceDisoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInMicrophone], mediaType: .audio, position: .unspecified)
let devices = audioDeviceDisoverySession.devices
micDevice = devices[0]
}
func setupInputOutput() {
do {
guard let cameraDevice = cameraDevice else { return }
let captureDeviceInput = try AVCaptureDeviceInput(device: cameraDevice)
guard let micDevice = micDevice else { return }
let micDeviceInput = try AVCaptureDeviceInput(device: micDevice)
captureSession.sessionPreset = AVCaptureSession.Preset.hd1920x1080
if captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
}
if captureSession.canAddInput(micDeviceInput) {
captureSession.addInput(micDeviceInput)
}
let queue = DispatchQueue(label: "com.apple.sample.capturepipeline.video", attributes: [])
if captureSession.canAddOutput(videoOutput) {
videoOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(videoOutput)
}
if captureSession.canAddOutput(audioOutput) {
audioOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(audioOutput)
}
captureSession.commitConfiguration()
captureSession.startRunning()
} catch {
print(error)
}
}
func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
audioOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
let cameraImage = CIImage(cvImageBuffer: imageBuffer)
guard let name = filterObject.name else {return}
let effect = FilterType.genericFilter(name: name, cameraImage: cameraImage)
effect.setValue(cameraImage, forKey: kCIInputImageKey)
TableData.setFilterValues(withFilterName: name, effect: effect, values: [value1, value2])
guard let outputImage = effect.outputImage else { return }
context.render(outputImage, to: imageBuffer)
guard let cgImage = self.context.createCGImage(outputImage, from: cameraImage.extent) else { return }
DispatchQueue.main.async {
let filteredImage = UIImage(cgImage: cgImage)
self.imageView.image = filteredImage
}
let timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer).seconds
switch recordingState {
case .start:
fileName = UUID().uuidString
let videoPath = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(fileName).mov")
let writer = try! AVAssetWriter(outputURL: videoPath, fileType: .mov)
let videoSettings = videoOutput.recommendedVideoSettingsForAssetWriter(writingTo: .mov)
let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
videoInput.mediaTimeScale = CMTimeScale(bitPattern: 600)
videoInput.expectsMediaDataInRealTime = true
let audioSettings = audioOutput.recommendedAudioSettingsForAssetWriter(writingTo: .m4a)
let audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings as? [String : Any])
audioInput.expectsMediaDataInRealTime = true
//videoInput.transform = CGAffineTransform(rotationAngle: .pi/2)
let pixelAdapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: nil)
if writer.canAdd(videoInput) {
writer.add(videoInput)
}
if writer.canAdd(audioInput) {
writer.add(audioInput)
}
writer.startWriting()
writer.startSession(atSourceTime: .zero)
assetWriter = writer
assetWriterInput = videoInput
pixelBufferAdaptor = pixelAdapter
recordingState = .capturing
time = timestamp
case .capturing:
if assetWriterInput?.isReadyForMoreMediaData == true {
let newTime = CMTime(seconds: timestamp - time, preferredTimescale: CMTimeScale(600))
pixelBufferAdaptor?.append(imageBuffer, withPresentationTime: newTime)
}
break
case .end:
guard assetWriterInput?.isReadyForMoreMediaData == true, assetWriter!.status != .failed else { break }
let url = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(fileName).mov")
assetWriterInput?.markAsFinished()
assetWriter?.finishWriting { [weak self] in
self?.recordingState = .idle
self?.assetWriter = nil
self?.assetWriterInput = nil
DispatchQueue.main.async {
let activity = UIActivityViewController(activityItems: [url], applicationActivities: nil)
self?.present(activity, animated: true, completion: nil)
}
}
default:
break
}
}
}
Your audio settings do not look correct. The AVSampleRateKey should come from the number of samples from the description of the first audio sample that comes in. Your value of 44100 should be set as the value for the AVEncoderBitRateKey and that should maybe be set to AVEncoderBitRateKey: Int(48_000)
To get the number of sample first call
let fmt = CMSampleBufferGetFormatDescription(sampleBuffer)
let asbd = CMAudioFormatDescriptionGetStreamBasicDescription(fmt!)
and then the sample rate will be asbd?.pointee.mSampleRate and then that should be set as the AVSampleRateKey value in the audio settings (I think)

AVFoundation Session issue

I am working on an application that requires recording video and I found a helper class written by appcoda and here is a link to the github repo https://github.com/appcoda/FullScreenCamera but the problem I am having with it is that it has when ever I run it, I get an error in the console saying noCamerasAvailable followed by captureSessionIsMissing and I am also commited to Improving the code and I just cannot figure out why. Here is the helper class
class CameraHelper: NSObject {
var captureSession: AVCaptureSession?
var currentCameraPosition: CameraPosition?
var frontCamera: AVCaptureDevice?
var frontCameraInput: AVCaptureDeviceInput?
var photoOutput: AVCapturePhotoOutput?
var rearCamera: AVCaptureDevice?
var rearCameraInput: AVCaptureDeviceInput?
var previewLayer: AVCaptureVideoPreviewLayer?
var flashMode = AVCaptureDevice.FlashMode.off
var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}
extension CameraHelper {
func prepare(completionHandler: #escaping (Error?) -> Void) {
func createCaptureSession() {
self.captureSession = AVCaptureSession()
}
func configureCaptureDevices() throws {
let session = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .unspecified)
let cameras = session.devices.compactMap { $0 }
guard !cameras.isEmpty else { throw CameraHelperError.noCamerasAvailable }
for camera in cameras {
if camera.position == .front {
self.frontCamera = camera
}
if camera.position == .back {
self.rearCamera = camera
try camera.lockForConfiguration()
camera.focusMode = .continuousAutoFocus
camera.unlockForConfiguration()
}
}
}
func configureDeviceInputs() throws {
guard let captureSession = self.captureSession else { throw CameraHelperError.captureSessionIsMissing }
if let rearCamera = self.rearCamera {
self.rearCameraInput = try AVCaptureDeviceInput(device: rearCamera)
if captureSession.canAddInput(self.rearCameraInput!) { captureSession.addInput(self.rearCameraInput!) }
self.currentCameraPosition = .rear
}
else if let frontCamera = self.frontCamera {
self.frontCameraInput = try AVCaptureDeviceInput(device: frontCamera)
if captureSession.canAddInput(self.frontCameraInput!) { captureSession.addInput(self.frontCameraInput!) }
else { throw CameraHelperError.inputsAreInvalid }
self.currentCameraPosition = .front
}
else { throw CameraHelperError.noCamerasAvailable }
}
func configurePhotoOutput() throws {
guard let captureSession = self.captureSession else { throw CameraHelperError.captureSessionIsMissing }
self.photoOutput = AVCapturePhotoOutput()
self.photoOutput!.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey : AVVideoCodecType.jpeg])], completionHandler: nil)
if captureSession.canAddOutput(self.photoOutput!) { captureSession.addOutput(self.photoOutput!) }
captureSession.startRunning()
}
DispatchQueue(label: "prepare").async {
do {
createCaptureSession()
try configureCaptureDevices()
try configureDeviceInputs()
try configurePhotoOutput()
}
catch {
DispatchQueue.main.async {
completionHandler(error)
}
return
}
DispatchQueue.main.async {
completionHandler(nil)
}
}
}
func displayPreview(on view: UIView) throws {
guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraHelperError.captureSessionIsMissing }
self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = .portrait
view.layer.insertSublayer(self.previewLayer!, at: 0)
self.previewLayer?.frame = view.frame
}
func switchCameras() throws {
guard let currentCameraPosition = currentCameraPosition, let captureSession = self.captureSession, captureSession.isRunning else { throw CameraHelperError.captureSessionIsMissing }
captureSession.beginConfiguration()
func switchToFrontCamera() throws {
guard let rearCameraInput = self.rearCameraInput, captureSession.inputs.contains(rearCameraInput),
let frontCamera = self.frontCamera else { throw CameraHelperError.invalidOperation }
self.frontCameraInput = try AVCaptureDeviceInput(device: frontCamera)
captureSession.removeInput(rearCameraInput)
if captureSession.canAddInput(self.frontCameraInput!) {
captureSession.addInput(self.frontCameraInput!)
self.currentCameraPosition = .front
}
else {
throw CameraHelperError.invalidOperation
}
}
func switchToRearCamera() throws {
guard let frontCameraInput = self.frontCameraInput, captureSession.inputs.contains(frontCameraInput),
let rearCamera = self.rearCamera else { throw CameraHelperError.invalidOperation }
self.rearCameraInput = try AVCaptureDeviceInput(device: rearCamera)
captureSession.removeInput(frontCameraInput)
if captureSession.canAddInput(self.rearCameraInput!) {
captureSession.addInput(self.rearCameraInput!)
self.currentCameraPosition = .rear
}
else { throw CameraHelperError.invalidOperation }
}
switch currentCameraPosition {
case .front:
try switchToRearCamera()
case .rear:
try switchToFrontCamera()
}
captureSession.commitConfiguration()
}
func captureImage(completion: #escaping (UIImage?, Error?) -> Void) {
guard let captureSession = captureSession, captureSession.isRunning else { completion(nil, CameraHelperError.captureSessionIsMissing); return }
let settings = AVCapturePhotoSettings()
settings.flashMode = self.flashMode
self.photoOutput?.capturePhoto(with: settings, delegate: self)
self.photoCaptureCompletionBlock = completion
}
}
extension CameraHelper: AVCapturePhotoCaptureDelegate {
public func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?,
resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Swift.Error?) {
if let error = error { self.photoCaptureCompletionBlock?(nil, error) }
else if let buffer = photoSampleBuffer, let data = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: buffer, previewPhotoSampleBuffer: nil),
let image = UIImage(data: data) {
self.photoCaptureCompletionBlock?(image, nil)
}
else {
self.photoCaptureCompletionBlock?(nil, CameraHelperError.unknown)
}
}
}
extension CameraHelper {
enum CameraHelperError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
public enum CameraPosition {
case front
case rear
}
}
that is the helper class.
Use .builtInWideAngleCamera instead of .builtInDualCamera for Setup AVCaptureDevice.
Replace this function on your Project
func configureCaptureDevices() throws {
let session = AVCaptureDevice.DiscoverySession(deviceTypes: [. builtInWideAngleCamera], mediaType: AVMediaType.video, position: .unspecified)
let cameras = session.devices.compactMap { $0 }
guard !cameras.isEmpty else { throw CameraHelperError.noCamerasAvailable }
for camera in cameras {
if camera.position == .front {
self.frontCamera = camera
}
if camera.position == .back {
self.rearCamera = camera
try camera.lockForConfiguration()
camera.focusMode = .continuousAutoFocus
camera.unlockForConfiguration()
}
}
}

Couple issue with custom camera

I currently have a custom camera implemented into my application. I am running into two small issues.
1) When I switch b/w the views of the camera (front & back) the audio input dies, and only records video.
2) My method for deciding which camera view (front & back) is which, is depreciated, & I don't know how to exactly go about resolving it. For this one, the code is as follows: The depreciated part is the devices is storing as its variables. xCode is telling me: "Use AVCaptureDeviceDiscoverySession instead."
let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as! [AVCaptureDevice]
// Get the front and back-facing camera for taking photos
for device in devices {
if device.position == AVCaptureDevicePosition.back {
backFacingCamera = device
} else if device.position == AVCaptureDevicePosition.front {
frontFacingCamera = device
}
}
currentDevice = backFacingCamera
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice) else {
return
}
As for the general camera recording here are the codes:
My Variables:
let captureSession = AVCaptureSession()
var currentDevice:AVCaptureDevice?
var backFacingCamera: AVCaptureDevice?
var frontFacingCamera: AVCaptureDevice?
var videoFileOutput : AVCaptureMovieFileOutput?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?
#IBOutlet weak var recordingView: UIView!
switching cameras:
var device = AVCaptureDevice.defaultDevice(withDeviceType: .builtInWideAngleCamera, mediaType: AVMediaTypeVideo, position: .back)
func switchCameras() {
captureSession.beginConfiguration()
// Change the device based on the current camera
let newDevice = (currentDevice?.position == AVCaptureDevicePosition.back) ? frontFacingCamera : backFacingCamera
// Remove all inputs from the session
for input in captureSession.inputs {
captureSession.removeInput(input as! AVCaptureDeviceInput)
}
// Change to the new input
let cameraInput:AVCaptureDeviceInput
do {
cameraInput = try AVCaptureDeviceInput(device: newDevice)
} catch {
print(error)
return
}
if captureSession.canAddInput(cameraInput) {
captureSession.addInput(cameraInput)
}
currentDevice = newDevice
captureSession.commitConfiguration()
if currentDevice?.position == .front {
flashButton.isHidden = true
flashButton.isEnabled = false
} else if currentDevice?.position == .back {
flashButton.isHidden = false
flashButton.isEnabled = true
}
}
& In my view will appear:
mediaViewCapture.frame = CGRect(x: self.view.frame.size.width * 0, y: self.view.frame.size.height * 0, width:self.view.frame.size.width, height: self.view.frame.size.height)
self.view.addSubview(mediaViewCapture)
captureSession.sessionPreset = AVCaptureSessionPresetHigh
let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as! [AVCaptureDevice]
// Get the front and back-facing camera for taking photos
for device in devices {
if device.position == AVCaptureDevicePosition.back {
backFacingCamera = device
} else if device.position == AVCaptureDevicePosition.front {
frontFacingCamera = device
}
}
currentDevice = backFacingCamera
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice) else {
return
}
let audioInputDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio)
do
{
let audioInput = try AVCaptureDeviceInput(device: audioInputDevice)
// Add Audio Input
if captureSession.canAddInput(audioInput)
{
captureSession.addInput(audioInput)
}
else
{
NSLog("Can't Add Audio Input")
}
}
catch let error
{
NSLog("Error Getting Input Device: \(error)")
}
videoFileOutput = AVCaptureMovieFileOutput()
captureSession.addInput(captureDeviceInput)
captureSession.addOutput(videoFileOutput)
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(cameraPreviewLayer!)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
cameraPreviewLayer?.frame = mediaViewCapture.layer.frame
captureSession.startRunning()
& Finally my capture:
func capture(_ captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAt outputFileURL: URL!, fromConnections connections: [Any]!, error: Error!) {
if error == nil {
turnFlashOff()
let videoVC = VideoPreviewVC()
videoVC.url = outputFileURL
self.navigationController?.pushViewController(videoVC, animated: false)
} else {
print("Error saving the video \(error)")
}
}
You can look use AVCaptureDeviceDiscoverySession instead of AVCaptureDevice as it is deprecated following is the code for it:
let deviceDiscovery = AVCaptureDeviceDiscoverySession(deviceTypes: [AVCaptureDeviceType.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .back)
let devices = deviceDiscovery?.devices
for device in devices! {
if device.hasMediaType(AVMediaTypeVideo) {
captureDevice = device
}
}
AVCaptureDeviceType has following types: builtInMicrophone, builtInWideAngleCamera, builtInTelephotoCamera, builtInDualCamera and builtInDuoCamera.
Need to check the audioInput issue when camera is switched.

Swift - AVAudioPlayer doesn't work properly

I have the following code :
let speechRecognizer = SFSpeechRecognizer()!
let audioEngine = AVAudioEngine()
var recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
var recognitionTask = SFSpeechRecognitionTask()
var audioPlayer : AVAudioPlayer!
override func viewDidLoad() {
super.viewDidLoad()
playSound(sound: "oops")
speechRecognizer.delegate = self
requestSpeechAuth()
}
func requestSpeechAuth(){
SFSpeechRecognizer.requestAuthorization { (authStatus) in
OperationQueue.main.addOperation({
switch authStatus {
case.authorized:
print("authorized")
case.denied:
print("denied")
case.restricted:
print("restricted")
case.notDetermined:
print("not determined")
}
})
}
}
// Function called when I press on my record button
func SpeechButtonDown() {
print("Start recording")
if audioEngine.isRunning {
endRecording() {
} else {
do {
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(AVAudioSessionCategoryRecord)
try audioSession.setMode(AVAudioSessionModeMeasurement)
try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
if let inputNode = audioEngine.inputNode {
recognitionRequest.shouldReportPartialResults = true
recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
print("1")
if let result = result {
self.instructionLabel.text = result.bestTranscription.formattedString
print("2")
if result.isFinal {
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
if self.instructionLabel.text != "" {
self.compareWordwithVoice()
}
}
}
})
let recognitionFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recognitionFormat, block: { (buffer, when) in
self.recognitionRequest.append(buffer)
})
audioEngine.prepare()
try audioEngine.start()
}
} catch {
}
}
}
// Function called when I release the record button
func EndRecording() {
endRecording()
print("Stop recording")
}
func endRecording() {
audioEngine.stop()
recognitionRequest.endAudio()
audioEngine.inputNode?.removeTap(onBus: 0)
}
func playSound(sound: String) {
if let url = Bundle.main.url(forResource: sound, withExtension: "wav") {
do {
audioPlayer = try AVAudioPlayer(contentsOf: url)
guard let player = audioPlayer else { return }
player.prepareToPlay()
player.play()
print("tutu")
} catch let error {
print(error.localizedDescription)
}
}
}
func compareWordwithVoice() {
let StringToLearn = setWordToLearn()
print("StringToLearn : \(StringToLearn)")
if let StringRecordedFull = instructionLabel.text{
let StringRecorded = (StringRecordedFull as NSString).replacingOccurrences(of: " ", with: "").lowercased()
print("StringRecorded : \(StringRecorded)")
if StringRecorded == "appuyezsurleboutonendessousetprenoncezl’expression" {
print("not yet")
} else {
if StringToLearn == StringRecorded {
playSound(sound: "success")
print("success")
// update UI
} else {
playSound(sound: "oops")
print("oops")
// update UI
}
}
}
}
func setWordToLearn() -> String {
if let wordToLearnFull = expr?.expression {
print(wordToLearnFull)
var wordToLearn = (wordToLearnFull as NSString).replacingOccurrences(of: " ", with: "").lowercased()
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: ".", with: "")
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: "!", with: "")
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: "?", with: "")
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: ",", with: "")
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: "/", with: "")
print(wordToLearn)
return wordToLearn
}
print("no wordToLearn")
return ""
}
The problem is that the playSound works perfectly when it is in the viewDidLoad but doesn't work when it is called by the compareThing() function but it display "tutu" on both cases so it performs the playSound function every time.
Can the problem be if AVAudioPlayer and AVAudioEngine cannot work at the same time ?
Thx
Ive experienced the same thing with my code and from searching online it seems like there is an unspoken bug "when using AvAudioPlayer and Engine separately"
I got the information from the following link. I did not find anything else online that states why this bug happens though.
https://swiftios8dev.wordpress.com/2015/03/05/sound-effects-using-avaudioengine/
The suggestion was to use AVAudioEngine for everything.
I think "compareThings" always plays "oops" sound and this sound is not good (too quiet or broken).
Please try to play "oops" sound from "viewDidLoad" func to make sure sound is okay.
If it is okay (I don't think so) - set breakpoint in "playSound" func to see what is going on (sound name, does it exists etc).