AVFoundation Session issue - swift

I am working on an application that requires recording video and I found a helper class written by appcoda and here is a link to the github repo https://github.com/appcoda/FullScreenCamera but the problem I am having with it is that it has when ever I run it, I get an error in the console saying noCamerasAvailable followed by captureSessionIsMissing and I am also commited to Improving the code and I just cannot figure out why. Here is the helper class
class CameraHelper: NSObject {
var captureSession: AVCaptureSession?
var currentCameraPosition: CameraPosition?
var frontCamera: AVCaptureDevice?
var frontCameraInput: AVCaptureDeviceInput?
var photoOutput: AVCapturePhotoOutput?
var rearCamera: AVCaptureDevice?
var rearCameraInput: AVCaptureDeviceInput?
var previewLayer: AVCaptureVideoPreviewLayer?
var flashMode = AVCaptureDevice.FlashMode.off
var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}
extension CameraHelper {
func prepare(completionHandler: #escaping (Error?) -> Void) {
func createCaptureSession() {
self.captureSession = AVCaptureSession()
}
func configureCaptureDevices() throws {
let session = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .unspecified)
let cameras = session.devices.compactMap { $0 }
guard !cameras.isEmpty else { throw CameraHelperError.noCamerasAvailable }
for camera in cameras {
if camera.position == .front {
self.frontCamera = camera
}
if camera.position == .back {
self.rearCamera = camera
try camera.lockForConfiguration()
camera.focusMode = .continuousAutoFocus
camera.unlockForConfiguration()
}
}
}
func configureDeviceInputs() throws {
guard let captureSession = self.captureSession else { throw CameraHelperError.captureSessionIsMissing }
if let rearCamera = self.rearCamera {
self.rearCameraInput = try AVCaptureDeviceInput(device: rearCamera)
if captureSession.canAddInput(self.rearCameraInput!) { captureSession.addInput(self.rearCameraInput!) }
self.currentCameraPosition = .rear
}
else if let frontCamera = self.frontCamera {
self.frontCameraInput = try AVCaptureDeviceInput(device: frontCamera)
if captureSession.canAddInput(self.frontCameraInput!) { captureSession.addInput(self.frontCameraInput!) }
else { throw CameraHelperError.inputsAreInvalid }
self.currentCameraPosition = .front
}
else { throw CameraHelperError.noCamerasAvailable }
}
func configurePhotoOutput() throws {
guard let captureSession = self.captureSession else { throw CameraHelperError.captureSessionIsMissing }
self.photoOutput = AVCapturePhotoOutput()
self.photoOutput!.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey : AVVideoCodecType.jpeg])], completionHandler: nil)
if captureSession.canAddOutput(self.photoOutput!) { captureSession.addOutput(self.photoOutput!) }
captureSession.startRunning()
}
DispatchQueue(label: "prepare").async {
do {
createCaptureSession()
try configureCaptureDevices()
try configureDeviceInputs()
try configurePhotoOutput()
}
catch {
DispatchQueue.main.async {
completionHandler(error)
}
return
}
DispatchQueue.main.async {
completionHandler(nil)
}
}
}
func displayPreview(on view: UIView) throws {
guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraHelperError.captureSessionIsMissing }
self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = .portrait
view.layer.insertSublayer(self.previewLayer!, at: 0)
self.previewLayer?.frame = view.frame
}
func switchCameras() throws {
guard let currentCameraPosition = currentCameraPosition, let captureSession = self.captureSession, captureSession.isRunning else { throw CameraHelperError.captureSessionIsMissing }
captureSession.beginConfiguration()
func switchToFrontCamera() throws {
guard let rearCameraInput = self.rearCameraInput, captureSession.inputs.contains(rearCameraInput),
let frontCamera = self.frontCamera else { throw CameraHelperError.invalidOperation }
self.frontCameraInput = try AVCaptureDeviceInput(device: frontCamera)
captureSession.removeInput(rearCameraInput)
if captureSession.canAddInput(self.frontCameraInput!) {
captureSession.addInput(self.frontCameraInput!)
self.currentCameraPosition = .front
}
else {
throw CameraHelperError.invalidOperation
}
}
func switchToRearCamera() throws {
guard let frontCameraInput = self.frontCameraInput, captureSession.inputs.contains(frontCameraInput),
let rearCamera = self.rearCamera else { throw CameraHelperError.invalidOperation }
self.rearCameraInput = try AVCaptureDeviceInput(device: rearCamera)
captureSession.removeInput(frontCameraInput)
if captureSession.canAddInput(self.rearCameraInput!) {
captureSession.addInput(self.rearCameraInput!)
self.currentCameraPosition = .rear
}
else { throw CameraHelperError.invalidOperation }
}
switch currentCameraPosition {
case .front:
try switchToRearCamera()
case .rear:
try switchToFrontCamera()
}
captureSession.commitConfiguration()
}
func captureImage(completion: #escaping (UIImage?, Error?) -> Void) {
guard let captureSession = captureSession, captureSession.isRunning else { completion(nil, CameraHelperError.captureSessionIsMissing); return }
let settings = AVCapturePhotoSettings()
settings.flashMode = self.flashMode
self.photoOutput?.capturePhoto(with: settings, delegate: self)
self.photoCaptureCompletionBlock = completion
}
}
extension CameraHelper: AVCapturePhotoCaptureDelegate {
public func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?,
resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Swift.Error?) {
if let error = error { self.photoCaptureCompletionBlock?(nil, error) }
else if let buffer = photoSampleBuffer, let data = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: buffer, previewPhotoSampleBuffer: nil),
let image = UIImage(data: data) {
self.photoCaptureCompletionBlock?(image, nil)
}
else {
self.photoCaptureCompletionBlock?(nil, CameraHelperError.unknown)
}
}
}
extension CameraHelper {
enum CameraHelperError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
public enum CameraPosition {
case front
case rear
}
}
that is the helper class.

Use .builtInWideAngleCamera instead of .builtInDualCamera for Setup AVCaptureDevice.
Replace this function on your Project
func configureCaptureDevices() throws {
let session = AVCaptureDevice.DiscoverySession(deviceTypes: [. builtInWideAngleCamera], mediaType: AVMediaType.video, position: .unspecified)
let cameras = session.devices.compactMap { $0 }
guard !cameras.isEmpty else { throw CameraHelperError.noCamerasAvailable }
for camera in cameras {
if camera.position == .front {
self.frontCamera = camera
}
if camera.position == .back {
self.rearCamera = camera
try camera.lockForConfiguration()
camera.focusMode = .continuousAutoFocus
camera.unlockForConfiguration()
}
}
}

Related

define video url as the uiview in your class

My swift code should be able to take a snapshot of a video and then take that image and display in a uiimageview. Instead of using a online link I just want the url to be the uiview in my class.So the video url should be previewView not the https link that I have below. All the code below is in this class
import UIKit;import AVFoundation
class ViewController: UIViewController, AVCapturePhotoCaptureDelegate {
#IBOutlet var previewView : UIView!
#IBOutlet var captureImageView : UIImageView!
var captureSession: AVCaptureSession!
var stillImageOutput: AVCapturePhotoOutput!
var videoPreviewLayer: AVCaptureVideoPreviewLayer!
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
// Setup your camera here...
captureSession = AVCaptureSession()
captureSession.sessionPreset = .medium
guard let backCamera = AVCaptureDevice.default(for: AVMediaType.video)
else {
print("Unable to access back camera!")
return
}
do {
let input = try AVCaptureDeviceInput(device: backCamera)
//Step 9
stillImageOutput = AVCapturePhotoOutput()
stillImageOutput = AVCapturePhotoOutput()
if captureSession.canAddInput(input) && captureSession.canAddOutput(stillImageOutput) {
captureSession.addInput(input)
captureSession.addOutput(stillImageOutput)
setupLivePreview()
}
}
catch let error {
print("Error Unable to initialize back camera: \(error.localizedDescription)")
}
}
func setupLivePreview() {
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer.videoGravity = .resizeAspect
videoPreviewLayer.connection?.videoOrientation = .portrait
previewView.layer.addSublayer(videoPreviewLayer)
//Step12
DispatchQueue.global(qos: .userInitiated).async { //[weak self] in
self.captureSession.startRunning()
//Step 13
DispatchQueue.main.async {
self.videoPreviewLayer.frame = self.previewView.bounds
}
}
}
#IBAction func startRecord(_ sender: Any) {
}
#IBAction func Save(_ sender: Any) {
//what do I put in the 2 highlighted blocks
let videoURL = "https://www.youtube.com/watch?v=Txt25dw-lIk"
self.getThumbnailFromUrl(videoURL) { [weak self] (img) in
guard let _ = self else { return }
if let img = img {
self?.captureImageView.image = img
}
}
}
func getThumbnailFromUrl(_ url: String?, _ completion: #escaping ((_ image: UIImage?)->Void)) {
guard let url = URL(string: url ?? "") else { return }
DispatchQueue.main.async {
let asset = AVAsset(url: url)
let assetImgGenerate = AVAssetImageGenerator(asset: asset)
assetImgGenerate.appliesPreferredTrackTransform = true
let time = CMTimeMake(value: 2, timescale: 1)
do {
let img = try assetImgGenerate.copyCGImage(at: time, actualTime: nil)
let thumbnail = UIImage(cgImage: img)
completion(thumbnail)
} catch {
print("Error :: ", error.localizedDescription)
completion(nil)
}
}
}
#IBAction func didTakePhoto(_ sender: Any) {
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
stillImageOutput.capturePhoto(with: settings, delegate: self)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
captureImageView.image = image
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
self.captureSession.stopRunning()
}
}

func captureOutput is never called

Add like to add filter to each frame i record in real time and display the filtered image in UIImageView, if anyone could help it would be nice.
but captureoutput is never called, here is my code.
class Measurement: UIViewController , AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var cameraPreview: UIView!
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
setupCameraSession()
toggleTorch(on: true)
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
view.layer.addSublayer(previewLayer)
cameraSession.startRunning()
}
lazy var cameraSession: AVCaptureSession = {
let s = AVCaptureSession()
s.sessionPreset = AVCaptureSession.Preset.low
return s
}()
lazy var previewLayer: AVCaptureVideoPreviewLayer = {
let preview = AVCaptureVideoPreviewLayer(session: self.cameraSession)
preview.position = CGPoint(x:182,y: 485)
preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
preview.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
preview.bounds = imageView.bounds
//preview.position = CGPoint(x:self.view.bounds.midX,y: self.view.bounds.midY)
imageView.layer.addSublayer(preview)
return preview
}()
func toggleTorch(on: Bool) {
guard let device = AVCaptureDevice.default(for: .video) else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
func setupCameraSession() {
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
do {
let deviceInput = try AVCaptureDeviceInput(device: captureDevice!)
cameraSession.beginConfiguration()
if (cameraSession.canAddInput(deviceInput) == true) {
cameraSession.addInput(deviceInput)
print("Processing Data.")
}
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA as UInt32)] as [String : AnyObject]
dataOutput.alwaysDiscardsLateVideoFrames = true
print("Processing Data.")
if (cameraSession.canAddOutput(dataOutput) == true) {
cameraSession.addOutput(dataOutput)
print("Processing Data.")
}
cameraSession.commitConfiguration()
let queue = DispatchQueue(label: "com.invasivecode.videoQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
catch let error as NSError {
print("\(error), \(error.localizedDescription)")
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("Processing Data.")
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
//let chromaKeyFilter = colorCubeFilterForChromaKey(hueAngle: 120)
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
let context = CIContext()
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return }
let image = UIImage(cgImage: cgImage)
if let chromaKeyFilter = CIFilter(name: "CISepiaTone") {
let beginImage = CIImage(image: image)
chromaKeyFilter.setValue(beginImage, forKey: kCIInputImageKey)
chromaKeyFilter.setValue(0.5, forKey: kCIInputIntensityKey)
if let output = chromaKeyFilter.outputImage {
if let cgimg = context.createCGImage(output, from: output.extent) {
let processedImage = UIImage(cgImage: cgimg)
// do something interesting with the processed image
imageView.image = processedImage
}
}
}
}
func captureOutput(_ captureOutput: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// Here you can count how many frames are dopped
}
func startCapture() {
print("\(self.classForCoder)/" + #function)
if cameraSession.isRunning {
print("already running")
return
}
cameraSession.startRunning()
toggleTorch(on: true)
}
You need to set the delegate
dataOutput.sampleBufferDelegate = self

Reading a barcode image without using cocoapods or other external API's

I'm trying to use the new Apple Vision API to detect a barcode from an image and return its details. I've successfully detected a QR code and returned a message using the CIDetector. However I can't make this work for 1 dimensional barcodes. Heres an example result:
import UIKit
import Vision
class BarcodeDetector {
func recognizeBarcode(for source: UIImage,
complete: #escaping (UIImage) -> Void) {
var resultImage = source
let detectBarcodeRequest = VNDetectBarcodesRequest { (request, error) in
if error == nil {
if let results = request.results as? [VNBarcodeObservation] {
print("Number of Barcodes found: \(results.count)")
if results.count == 0 { print("\r") }
var barcodeBoundingRects = [CGRect]()
for barcode in results {
barcodeBoundingRects.append(barcode.boundingBox)
let barcodeType = String(barcode.symbology.rawValue)?.replacingOccurrences(of: "VNBarcodeSymbology", with: "")
print("-Barcode Type: \(barcodeType!)")
if barcodeType == "QR" {
let image = CIImage(image: source)
image?.cropping(to: barcode.boundingBox)
self.qrCodeDescriptor(qrCode: barcode, qrCodeImage: image!)
}
}
resultImage = self.drawOnImage(source: resultImage, barcodeBoundingRects: barcodeBoundingRects)
}
} else {
print(error!.localizedDescription)
}
complete(resultImage)
}
let vnImage = VNImageRequestHandler(cgImage: source.cgImage!, options: [:])
try? vnImage.perform([detectBarcodeRequest])
}
private func qrCodeDescriptor(qrCode: VNBarcodeObservation, qrCodeImage: CIImage) {
if let description = qrCode.barcodeDescriptor as? CIQRCodeDescriptor {
readQRCode(qrCodeImage: qrCodeImage)
print(" -Payload: \(description.errorCorrectedPayload)")
print(" -Mask Pattern: \(description.maskPattern)")
print(" -Symbol Version: \(description.symbolVersion)\n")
}
}
private func readQRCode(qrCodeImage: CIImage) {
let detector: CIDetector = CIDetector(ofType: CIDetectorTypeQRCode, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])!
var qrCodeLink = ""
let features = detector.features(in: qrCodeImage)
for feature in features as! [CIQRCodeFeature] {
if let messageString = feature.messageString {
qrCodeLink += messageString
}
}
if qrCodeLink == "" {
print(" -No Code Message")
} else {
print(" -Code Message: \(qrCodeLink)")
}
}
How can I convert the image into an AVMetadataObject and then read it from there? Or is there a better approach?
Swift 4.1, using the Vision Framework (No 3rd party stuff or Pods)
Try this. It works for QR and for other types (Code39 in this example):
func startDetection() {
let request = VNDetectBarcodesRequest(completionHandler: self.detectHandler)
request.symbologies = [VNBarcodeSymbology.code39] // or use .QR, etc
self.requests = [request]
}
func detectHandler(request: VNRequest, error: Error?) {
guard let observations = request.results else {
//print("no result")
return
}
let results = observations.map({$0 as? VNBarcodeObservation})
for result in results {
print(result!.payloadStringValue!)
}
}
And then in:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
var requestOptions:[VNImageOption:Any] = [:]
if let camData = CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, nil) {
requestOptions = [.cameraIntrinsics:camData]
}
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: CGImagePropertyOrientation(rawValue: 6)!, options: requestOptions)
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
The rest of the implementation is the regular AVCaptureDevice and AVCaptureSession stuff. You will also need to conform to AVCaptureVideoDataOutputSampleBufferDelegate
import AVFoundation
import Vision
var captureDevice: AVCaptureDevice!
var session = AVCaptureSession()
var requests = [VNRequest]()
func viewDidLoad() {
self.setupVideo()
self.startDetection()
}
func setupVideo() {
session.sessionPreset = AVCaptureSession.Preset.photo
captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
let deviceInput = try! AVCaptureDeviceInput(device: captureDevice!)
let deviceOutput = AVCaptureVideoDataOutput()
deviceOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
deviceOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
session.addInput(deviceInput)
session.addOutput(deviceOutput)
let imageLayer = AVCaptureVideoPreviewLayer(session: session)
imageLayer.frame = imageView.bounds
imageView.layer.addSublayer(imageLayer)
session.startRunning()
}

Swift - AVAudioPlayer doesn't work properly

I have the following code :
let speechRecognizer = SFSpeechRecognizer()!
let audioEngine = AVAudioEngine()
var recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
var recognitionTask = SFSpeechRecognitionTask()
var audioPlayer : AVAudioPlayer!
override func viewDidLoad() {
super.viewDidLoad()
playSound(sound: "oops")
speechRecognizer.delegate = self
requestSpeechAuth()
}
func requestSpeechAuth(){
SFSpeechRecognizer.requestAuthorization { (authStatus) in
OperationQueue.main.addOperation({
switch authStatus {
case.authorized:
print("authorized")
case.denied:
print("denied")
case.restricted:
print("restricted")
case.notDetermined:
print("not determined")
}
})
}
}
// Function called when I press on my record button
func SpeechButtonDown() {
print("Start recording")
if audioEngine.isRunning {
endRecording() {
} else {
do {
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(AVAudioSessionCategoryRecord)
try audioSession.setMode(AVAudioSessionModeMeasurement)
try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
if let inputNode = audioEngine.inputNode {
recognitionRequest.shouldReportPartialResults = true
recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
print("1")
if let result = result {
self.instructionLabel.text = result.bestTranscription.formattedString
print("2")
if result.isFinal {
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
if self.instructionLabel.text != "" {
self.compareWordwithVoice()
}
}
}
})
let recognitionFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recognitionFormat, block: { (buffer, when) in
self.recognitionRequest.append(buffer)
})
audioEngine.prepare()
try audioEngine.start()
}
} catch {
}
}
}
// Function called when I release the record button
func EndRecording() {
endRecording()
print("Stop recording")
}
func endRecording() {
audioEngine.stop()
recognitionRequest.endAudio()
audioEngine.inputNode?.removeTap(onBus: 0)
}
func playSound(sound: String) {
if let url = Bundle.main.url(forResource: sound, withExtension: "wav") {
do {
audioPlayer = try AVAudioPlayer(contentsOf: url)
guard let player = audioPlayer else { return }
player.prepareToPlay()
player.play()
print("tutu")
} catch let error {
print(error.localizedDescription)
}
}
}
func compareWordwithVoice() {
let StringToLearn = setWordToLearn()
print("StringToLearn : \(StringToLearn)")
if let StringRecordedFull = instructionLabel.text{
let StringRecorded = (StringRecordedFull as NSString).replacingOccurrences(of: " ", with: "").lowercased()
print("StringRecorded : \(StringRecorded)")
if StringRecorded == "appuyezsurleboutonendessousetprenoncezl’expression" {
print("not yet")
} else {
if StringToLearn == StringRecorded {
playSound(sound: "success")
print("success")
// update UI
} else {
playSound(sound: "oops")
print("oops")
// update UI
}
}
}
}
func setWordToLearn() -> String {
if let wordToLearnFull = expr?.expression {
print(wordToLearnFull)
var wordToLearn = (wordToLearnFull as NSString).replacingOccurrences(of: " ", with: "").lowercased()
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: ".", with: "")
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: "!", with: "")
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: "?", with: "")
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: ",", with: "")
wordToLearn = (wordToLearn as NSString).replacingOccurrences(of: "/", with: "")
print(wordToLearn)
return wordToLearn
}
print("no wordToLearn")
return ""
}
The problem is that the playSound works perfectly when it is in the viewDidLoad but doesn't work when it is called by the compareThing() function but it display "tutu" on both cases so it performs the playSound function every time.
Can the problem be if AVAudioPlayer and AVAudioEngine cannot work at the same time ?
Thx
Ive experienced the same thing with my code and from searching online it seems like there is an unspoken bug "when using AvAudioPlayer and Engine separately"
I got the information from the following link. I did not find anything else online that states why this bug happens though.
https://swiftios8dev.wordpress.com/2015/03/05/sound-effects-using-avaudioengine/
The suggestion was to use AVAudioEngine for everything.
I think "compareThings" always plays "oops" sound and this sound is not good (too quiet or broken).
Please try to play "oops" sound from "viewDidLoad" func to make sure sound is okay.
If it is okay (I don't think so) - set breakpoint in "playSound" func to see what is going on (sound name, does it exists etc).

Switching Camera with a button in Swift

This seems to work to switch the camera from the back to the front, but I'm trying to come up with an 'if' statement so that I can switch it back too. Any ideas or advice?
#IBAction func didTouchSwitchButton(sender: UIButton) {
let camera = getDevice(.Front)
let cameraBack = getDevice(.Back)
do {
input = try AVCaptureDeviceInput(device: camera)
} catch let error as NSError {
print(error)
input = nil
}
if(captureSession?.canAddInput(input) == true){
captureSession?.addInput(input)
stillImageOutput?.outputSettings = [AVVideoCodecKey : AVVideoCodecJPEG]
if(captureSession?.canAddOutput(stillImageOutput) == true){
captureSession?.addOutput(stillImageOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = cameraView.bounds
cameraView.layer.addSublayer(previewLayer!)
captureSession?.startRunning()
}
}
}
func switchCamera(_ sender: UIButton) {
if let session = AVCaptureSession() {
let currentCameraInput: AVCaptureInput = session.inputs[0]
session.removeInput(currentCameraInput)
var newCamera: AVCaptureDevice
newCamera = AVCaptureDevice.default(for: AVMediaType.video)!
if (currentCameraInput as! AVCaptureDeviceInput).device.position == .back {
UIView.transition(with: self.cameraView, duration: 0.5, options: .transitionFlipFromLeft, animations: {
newCamera = self.cameraWithPosition(.front)!
}, completion: nil)
} else {
UIView.transition(with: self.cameraView, duration: 0.5, options: .transitionFlipFromRight, animations: {
newCamera = self.cameraWithPosition(.back)!
}, completion: nil)
}
do {
try self.captureSession?.addInput(AVCaptureDeviceInput(device: newCamera))
}
catch {
print("error: \(error.localizedDescription)")
}
}
}
func cameraWithPosition(_ position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let deviceDescoverySession = AVCaptureDevice.DiscoverySession.init(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
for device in deviceDescoverySession.devices {
if device.position == position {
return device
}
}
return nil
}
First create an enum to check the type of camera:
enum CameraDirection {
case .front
case .back
}
Then create a variable for the enum:
var currentDirection: CameraDirection = .front//or initial direction
Then in your didTouchSwitchButton function:
if (currentDirection == .front) {
currentDirection = .back
} else {
currentDirection = .front
}
reload()
You can see that I called a function called reload. Create that:
func reload() {
let camera = getDevice(.Front)
let cameraBack = getDevice(.Back)
do {
if currentDirection == .front {
input = try AVCaptureDeviceInput(device: camera)
} else {
input = try AVCaptureDeviceInput(device: cameraBack)
}
} catch let error as NSError {
print(error)
input = nil
}
//rest of code
}