I'm trying to get the image of the capturing device but it keeps returning nil by if let input... and I can't find a solution.
It tries to catch the error but it instantly cashes..
let backCamera = AVCaptureDevice.default(for: AVMediaType.video)
do {
if let input = try AVCaptureDeviceInput(device: backCamera!) as? AVCaptureInput {
if (captureSession?.canAddInput(input))! {
captureSession?.addInput(input)
stillImageOutput = AVCapturePhotoOutput()
let settings = AVCapturePhotoSettings()
let settingsFormat = [AVVideoCodecKey : AVVideoCodecType.jpeg]
settings.previewPhotoFormat = settingsFormat
stillImageOutput?.capturePhoto(with: settings, delegate: self as! AVCapturePhotoCaptureDelegate)
if (captureSession?.canAddOutput(stillImageOutput!))! {
captureSession?.addOutput(stillImageOutput!)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspect
previewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraView.layer.addSublayer(previewLayer!)
captureSession?.startRunning()
}
}
}
} catch let error as NSError {
print("Error: \(error)")
}
It's not your error throwing that's causing the crash - it's all your force unwrapping. backCamera, captureSession, stillImageOutput, and previewLayer are all Optionals, but you're force unwrapping all of them every time. Why not use guard statements or optional binding to avoid those crashes? Ultimately you can't run any of that code if those items are nil, so I'd refactor your code like this:
//Use guard to make sure you have a non-nil captureSession and a default device for .video
guard let captureSession = captureSession, let backCamera = AVCaptureDevice.default(for: AVMediaType.video) else { return }
do {
if let input = try AVCaptureDeviceInput(device: backCamera) as? AVCaptureInput {
if (captureSession.canAddInput(input)) {
captureSession.addInput(input)
stillImageOutput = AVCapturePhotoOutput()
//Since stillImageOutput is an Optional I'm putting in another guard just to avoid having to deal with it as an Optional
guard let stillImageOutput = stillImageOutput else { return }
let settings = AVCapturePhotoSettings()
let settingsFormat = [AVVideoCodecKey : AVVideoCodecType.jpeg]
settings.previewPhotoFormat = settingsFormat
stillImageOutput.capturePhoto(with: settings, delegate: self as! AVCapturePhotoCaptureDelegate)
if (captureSession.canAddOutput(stillImageOutput)) {
captureSession.addOutput(stillImageOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
//Same as above - since you declared previewLayer as Optional I'll guard so I don't have to deal with it as an Optional
guard let previewLayer = previewLayer else { return }
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspect
previewLayer.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraView.layer.addSublayer(previewLayer)
captureSession.startRunning()
}
}
}
} catch let error {
print("Error: \(error)")
}
Honestly some of this is a bit overkill, so it's down to a bit of personal preference, but with something like AVCaptureDevice.default... where it truly may or may not be nil depending on the actual device you probably don't want to let it crash if it doesn't exist.
Related
video not playing when app rebuild (file path url saving into coreData) using AVCaptureSession
filepath not changing before and after rebuild.
file:///private/var/mobile/Containers/Data/Application/3DA93FBC-9A20-40B4-A017-B3D5C7768301/tmp/63F6CEED-3202-4F5F-999B-5F138D73635D.mp4
i did all the ways, nothing works
here my code for record the video
func setupPreview() {
// Configure previewLayer
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = shapeLayer.bounds
previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
shapeLayer.layer.addSublayer(previewLayer!)
}
func setupSession() -> Bool {
captureSession.sessionPreset = AVCaptureSession.Preset.high
// Setup Camera
let camera = AVCaptureDevice.default(for: AVMediaType.video)!
do {
let input = try AVCaptureDeviceInput(device: camera)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
activeInput = input
}
} catch {
print("Error setting device video input: \(error)")
return false
}
// Setup Microphone
let microphone = AVCaptureDevice.default(for: AVMediaType.audio)!
do {
let micInput = try AVCaptureDeviceInput(device: microphone)
if captureSession.canAddInput(micInput) {
captureSession.addInput(micInput)
}
} catch {
print("Error setting device audio input: \(error)")
return false
}
// Movie output
if captureSession.canAddOutput(movieOutput) {
captureSession.addOutput(movieOutput)
}
return true
}
func startSession() {
if !captureSession.isRunning {
videoQueue().async {
self.captureSession.startRunning()
}
}
}
func stopSession() {
if captureSession.isRunning {
videoQueue().async {
self.captureSession.stopRunning()
}
}
}
func videoQueue() -> DispatchQueue {
return DispatchQueue.main
}
func currentVideoOrientation() -> AVCaptureVideoOrientation {
var orientation: AVCaptureVideoOrientation
switch UIDevice.current.orientation {
case .portrait:
orientation = AVCaptureVideoOrientation.portrait
case .landscapeRight:
orientation = AVCaptureVideoOrientation.landscapeLeft
case .portraitUpsideDown:
orientation = AVCaptureVideoOrientation.portraitUpsideDown
default:
orientation = AVCaptureVideoOrientation.landscapeRight
}
return orientation
}
func startRecording() {
if movieOutput.isRecording == false {
save.setTitle("stop", for: UIControl.State.normal)
let connection = movieOutput.connection(with: AVMediaType.video)
if (connection?.isVideoOrientationSupported)! {
connection?.videoOrientation = currentVideoOrientation()
}
if (connection?.isVideoStabilizationSupported)! {
connection?.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
}
let device = activeInput.device
if (device.isSmoothAutoFocusSupported) {
do {
try device.lockForConfiguration()
device.isSmoothAutoFocusEnabled = false
device.unlockForConfiguration()
} catch {
print("Error setting configuration: \(error)")
}
}
//EDIT2: And I forgot this
outputURL = tempURL()
movieOutput.startRecording(to: outputURL, recordingDelegate: self)
}
else {
stopRecording()
}
}
func tempURL() -> URL? {
let directory = NSTemporaryDirectory() as NSString
let path = directory.appendingPathComponent(NSUUID().uuidString + ".mp4")
path22 = path
let directoryURL: URL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
let folderPath: URL = directoryURL.appendingPathComponent("Downloads", isDirectory: true)
let fileURL: URL = folderPath.appendingPathComponent(path)
return URL(fileURLWithPath: path)
}
func stopRecording() {
if movieOutput.isRecording == true {
movieOutput.stopRecording()
}
}
here saving into coredata
let managedObject = self.managedObjectContext
entity = NSEntityDescription.entity(forEntityName: "MediaData", in: managedObject!)
let personMO = NSManagedObject(entity: entity, insertInto: managedObject)
personMO.setValue("\(self.videoURL!)", forKey: "videosS")
personMO.setValue(dataImage, forKey: "thumbnails")
print(personMO)
do
{
try managedObject?.save()
print("video saved")
}
catch
{
print("Catch Erroe : Failed To
}
let appdel = UIApplication.shared.delegate as! AppDelegate
appdel.avplayer = AVPlayer(url: videoURL!)
print(videoURL!)
let playerLayer = AVPlayerLayer(player: appdel.avplayer)
playerLayer.frame = self.view.bounds
self.view.layer.addSublayer(playerLayer)
appdel.avplayer?.play()
You must never save a full filepath into CoreData or anywhere else. File paths are not persistent. Your app is sandboxed. The sandbox path can change at any time, especially between launches and installations.
Instead, save the file name and reconstruct the path each time you need it. Just as you are calling FileManager.default.urls(for: .documentDirectory...) to construct the file path initially, so you must call it every time you want to access this file.
I've created a function called prepareCamera. It checks for device type, media type, and camera position.
When I use the if/ let on my object availableDevices, I get the error "Initializer for conditional binding must have optional type. After going through stack flow, I realized that my object is not an optional and if/let should only be used for optional type. I removed the if/ let, and I get the error, use of unresolved identifier.
My code is below. This is my first time using stackflow so bare with me haha. Any help would be greatly appreciated.
import UIKit
import AVFoundation
class goliveViewController: UIViewController {
let captureSession = AVCaptureSession()
var previewLayer:CALayer!
var captureDevice:AVCaptureDevice!
override func viewDidLoad() {
super.viewDidLoad()
prepareCamera()
// Do any additional setup after loading the view.
}
func prepareCamera () {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
if let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera] , mediaType: AVMediaType.video, position: .front).devices {
captureDevice = availableDevices.first
beginSession()
}
}
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
}catch {
print(error.localizedDescription)
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString):NSNumber(value:kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
}
}
So, I am a begginer and I have been following an online tutorial. I have copied the code and I get no errors but my program does not seem to be doeing anything. This is the code:
import UIKit
import AVKit
import Vision
class viewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
override func viewDidLoad(){
super.viewDidLoad()
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
guard let captureDevice = AVCaptureDevice.default(for: .video) else{ return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice)else{ return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self , queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
guard let model = try?VNCoreMLModel(for: SqueezeNet().model)else { return }
let request = VNCoreMLRequest(model: model)
{ (finishedReq, err) in
//print(finishedReq.results)
guard let results = finishedReq.results as? [VNClassificationObservation] else { return }
guard let firstObservation = results.first else { return }
print (firstObservation.identifier, firstObservation.confidence)
}
//VNImageRequestHandler(cgImage: <#T##CGImage#>, options: [:]).perform(requests: [VNRequest])
func captureOutput(_ output: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
//print("Camera was able to capture a frame:", Date())
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)else { return }
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
}
}
One thing you forgot is asking permission to use the camera.
Something like:
AVCaptureDevice.requestAccess(for: AVMediaType.video)
{ response in
if response
{
// Access granted. You can continue.
}
else
{
// Tell the user.
}
}
Also, you have the AVCaptureSession as a local/automatic variable. It will be deallocated leaving viewDidLoad(). You must have is an instance variable. The iOS documentation even shows this.
Having made at least above two simple mistakes, I advise you to follow a tutorial on how to do the basics. Then once you've got that right and you see something on screen, add the ML stuff.
Good luck, it seems like a very nice subject!
I am building an app similar to a camera app in Xcode 10.1 using Swift. To do this, I have imported AVFoundation, and am close to finishing my code. However, upon this line of code
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
which is in this block of code
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput( device: captureDevice!)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = self.previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
There appears an error that reads "Cannot invoke initializer for type 'AVCaptureVideoPreviewLayer' with an argument list of type '(session: AVCaptureSession, () -> ())'"
I don't exactly know what this means or how to fix it as I am relatively new to programming.
Where have you initialized captureSession?
Try something like this in your UIViewController:
var captureSession = AVCaptureSession()
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
beginSession()
}
func beginSession() {
// Get an instance of the AVCaptureDevice class to initialize a device object and provide the video as the media type parameter.
if let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) {
do {
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
let input = try AVCaptureDeviceInput(device: captureDevice)
// Set the input device on the capture session.
captureSession.addInput(input)
// Initialize a AVCaptureVideoDataOutput object and set it as the output device to the capture session.
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer.
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = self.view.layer.bounds // It may be best to setup an UIView outlet instead of using self.view
self.view.layer.addSublayer(videoPreviewLayer!)
// Start video capture.
captureSession.startRunning()
} catch {
// If any error occurs, simply print it out and don't continue any more.
print(error)
return
}
}
}
Hope it helps you!
I am creating an app using Swift 4 and Xcode 9 that scans PDF417 barcodes using AVFoundation. The scanner works with some codes but doesn't recognize the PDF417 barcode that you would find on the front of a CA Lottery scratchers ticket for example.
Is there anything I am missing to make it work? Below is my code:
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
guard let captureDevice = deviceDiscoverySession.devices.first else {
print("Failed to get the camera device")
return
}
do {
captureSession = AVCaptureSession()
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession!.addInput(input)
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession!.addOutput(captureMetadataOutput)
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.pdf417]
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
} catch {
print(error)
return
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
//Get the metadata object
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if scanType.contains(metadataObj.type) {
let barCodeObj = videoPreviewLayer?.transformedMetadataObject(for: metadataObj)
if(metadataObj.stringValue != nil) {
callDelegate(metadataObj.stringValue)
captureSession?.stopRunning()
AudioServicesPlayAlertSound(SystemSoundID(kSystemSoundID_Vibrate))
navigationController?.popViewController(animated: true)
}
}
}
Thanks!
Replace your initialization code for the scanner with the following code either in your viewDidLoad or some method that you'd like it to be in
// Global vars used in init below
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
func setupCaptureInputDevice() {
let cameraMediaType = AVMediaType.video
captureSession = AVCaptureSession()
// get the video capture device, which should be of type video
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else {
// if there is an error then something is wrong, so dismiss
dismiss(animated: true, completion: nil)
return
}
let videoInput: AVCaptureDeviceInput
// create a capture input for the above device input that was created
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
// this is important to check for if we are able to add the input
// because adding before this could cause a crash or it could not happen
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
// dismiss or display error
return
}
// get ready to capture output somewhere
let metadataOutput = AVCaptureMetadataOutput()
// again check to make sure that we can do this operation before doing it
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
// setting the metadataOutput's delegate to be self and then requesting it run on the main thread
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
// specify your code type
metadataOutput.metadataObjectTypes = [.pdf417]
} else {
// dismiss or display error
return
}
// the preview layer now becomes the capture session
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
// just add it to the screen
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
// and begin input
captureSession.startRunning()
}