use of unresolved identifier - swift

The input has been be defined as try AVCaptureDeviceInput(device: captureDevice) but it still says input is an unresolved identifier.
Please see my code below, I have tried multiple methods but not success.
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var captureSession: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var qrCodeFrameView: UIView?
override func viewDidLoad() {
super.viewDidLoad()
// Get an instance of AVCaptureDevice class to initialize a device object and provide the video as the media type parameter.
let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
do {
let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let input = try AVCaptureDeviceInput(device: captureDevice)
// Do the rest of your work...
} catch let error as NSError {
// Handle any errors
print(error)
}
// Initialize the captureSession object
captureSession = AVCaptureSession()
captureSession?.addInput(input as! AVCaptureInput)
// Set the input device on the capture session.
// Initialize a AVCaptureMetadataOutput object and set it as the output device to the capture session.
let captureMetadataOuput = AVCaptureMetadataOutput()
captureSession?.addOutput(captureMetadataOuput)
// Set delegate and use the default dispatch queue to execute the call back
captureMetadataOuput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOuput.metadataObjectTypes = [AVMetadataObjectTypeQRCode]
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer.
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
// Start video capture
captureSession?.startRunning()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
How do I fix this?

As already explained in the other answers, your input
variable is limited to the scope of the do block.
An alternative solution – if you want to keep the do/catch blocks
smaller and localized – is to declare the variable outside of the block:
let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let input: AVCaptureDeviceInput
do {
input = try AVCaptureDeviceInput(device: captureDevice)
} catch let error as NSError {
print(error)
return // Must return from method here ...
}
// `input` is defined and initialized now ...
captureSession = AVCaptureSession()
captureSession?.addInput(input)
// ...
Note that this requires that your return immediately in the error
case, as input would be undefined then.
Or, if the error message is not important, use try? in a guard
statement:
let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else {
return
}
// `input` is defined and initialized now ...
captureSession = AVCaptureSession()
captureSession?.addInput(input)
// ...

It's a scope issue. Your captureDevice and input constants are only usable inside the do block. Update your code to something like this:
override func viewDidLoad() {
super.viewDidLoad()
// Get an instance of AVCaptureDevice class to initialize a device object and provide the video as the media type parameter.
let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
do {
let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let input = try AVCaptureDeviceInput(device: captureDevice)
// Initialize the captureSession object
captureSession = AVCaptureSession()
captureSession?.addInput(input as! AVCaptureInput)
// Set the input device on the capture session.
// Initialize a AVCaptureMetadataOutput object and set it as the output device to the capture session.
let captureMetadataOuput = AVCaptureMetadataOutput()
captureSession?.addOutput(captureMetadataOuput)
// Set delegate and use the default dispatch queue to execute the call back
captureMetadataOuput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOuput.metadataObjectTypes = [AVMetadataObjectTypeQRCode]
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer.
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
// Start video capture
captureSession?.startRunning()
} catch let error as NSError {
// Handle any errors
print(error)
}
}

input is in the scope of the do block, it's not visible outside.
Basically it's a very bad idea to just print the error and continue as if nothing happened. Always put the entire good code into the do block:
do {
let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let input = try AVCaptureDeviceInput(device: captureDevice)
// Initialize the captureSession object
captureSession = AVCaptureSession()
captureSession?.addInput(input as! AVCaptureInput)
// Initialize a AVCaptureMetadataOutput object and set it as the output
...
// Start video capture
captureSession?.startRunning()
// Do the rest of your work...
} catch let error as NSError {
// Handle any errors
print(error)
}

Related

"Initializer for conditional binding...." after I fix, I get error "use of unresolved indentifier"

I've created a function called prepareCamera. It checks for device type, media type, and camera position.
When I use the if/ let on my object availableDevices, I get the error "Initializer for conditional binding must have optional type. After going through stack flow, I realized that my object is not an optional and if/let should only be used for optional type. I removed the if/ let, and I get the error, use of unresolved identifier.
My code is below. This is my first time using stackflow so bare with me haha. Any help would be greatly appreciated.
import UIKit
import AVFoundation
class goliveViewController: UIViewController {
let captureSession = AVCaptureSession()
var previewLayer:CALayer!
var captureDevice:AVCaptureDevice!
override func viewDidLoad() {
super.viewDidLoad()
prepareCamera()
// Do any additional setup after loading the view.
}
func prepareCamera () {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
if let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera] , mediaType: AVMediaType.video, position: .front).devices {
captureDevice = availableDevices.first
beginSession()
}
}
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
}catch {
print(error.localizedDescription)
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString):NSNumber(value:kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
}
}

Initializer error in Camera App for Xcode in Swift

I am building an app similar to a camera app in Xcode 10.1 using Swift. To do this, I have imported AVFoundation, and am close to finishing my code. However, upon this line of code
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
which is in this block of code
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput( device: captureDevice!)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = self.previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
There appears an error that reads "Cannot invoke initializer for type 'AVCaptureVideoPreviewLayer' with an argument list of type '(session: AVCaptureSession, () -> ())'"
I don't exactly know what this means or how to fix it as I am relatively new to programming.
Where have you initialized captureSession?
Try something like this in your UIViewController:
var captureSession = AVCaptureSession()
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
beginSession()
}
func beginSession() {
// Get an instance of the AVCaptureDevice class to initialize a device object and provide the video as the media type parameter.
if let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) {
do {
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
let input = try AVCaptureDeviceInput(device: captureDevice)
// Set the input device on the capture session.
captureSession.addInput(input)
// Initialize a AVCaptureVideoDataOutput object and set it as the output device to the capture session.
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer.
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = self.view.layer.bounds // It may be best to setup an UIView outlet instead of using self.view
self.view.layer.addSublayer(videoPreviewLayer!)
// Start video capture.
captureSession.startRunning()
} catch {
// If any error occurs, simply print it out and don't continue any more.
print(error)
return
}
}
}
Hope it helps you!

Value of optional type 'AVCaptureDevice?' must be unwrapped to a value of type 'AVCaptureDevice'

I was following a tutorial on how to capture a barcode scanner from an ipad camera and this is the code that was written. The tutorial was written in xcode 8 and I am using Xcode 10. I am getting an error that
"Value of optional type 'AVCaptureDevice?' must be unwrapped to a
value of type 'AVCaptureDevice'"
in the do try catch statement. Can someone please tell me the correct way to deal with the optional value in this situation? When I change the line to read "let input = try AVCaptureDeviceInput(device: captureDevice!)"
the app crashes.
import AVFoundation
import UIKit
class ScannerViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var video = AVCaptureVideoPreviewLayer() //contains what the camera is pointing at
override func viewDidLoad() {
super.viewDidLoad()
//creates session
let session = AVCaptureSession()
//define capture device
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
do{
let input = try AVCaptureDeviceInput(device: captureDevice)//Error occurs here
session.addInput(input) //input coming from camera
}
catch{
print("Error")
}
let output = AVCaptureMetadataOutput()
session.addOutput(output)
output.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
output.metadataObjectTypes = [AVMetadataObject.ObjectType.code93, AVMetadataObject.ObjectType.code39] //may need to change this based on barcode type
video = AVCaptureVideoPreviewLayer(session: session)
video.frame = view.layer.bounds
view.layer.addSublayer(video)
session.startRunning()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
}
When I searched answer for this issue, it first showed your question only. Then I sought help from my colleague as well as finally it run without any error. Try below code.
let captureDevice = AVCaptureDevice.default(for: .video)
do{
let input = try AVCaptureDeviceInput(device: captureDevice!)
if self.session.canAddInput(input) {
self.captureSession.addInput(input)
}
}
catch{
print("Error")
}

AVFoundation PDF417 scanner doesn't always work

I am creating an app using Swift 4 and Xcode 9 that scans PDF417 barcodes using AVFoundation. The scanner works with some codes but doesn't recognize the PDF417 barcode that you would find on the front of a CA Lottery scratchers ticket for example.
Is there anything I am missing to make it work? Below is my code:
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
guard let captureDevice = deviceDiscoverySession.devices.first else {
print("Failed to get the camera device")
return
}
do {
captureSession = AVCaptureSession()
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession!.addInput(input)
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession!.addOutput(captureMetadataOutput)
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.pdf417]
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
} catch {
print(error)
return
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
//Get the metadata object
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if scanType.contains(metadataObj.type) {
let barCodeObj = videoPreviewLayer?.transformedMetadataObject(for: metadataObj)
if(metadataObj.stringValue != nil) {
callDelegate(metadataObj.stringValue)
captureSession?.stopRunning()
AudioServicesPlayAlertSound(SystemSoundID(kSystemSoundID_Vibrate))
navigationController?.popViewController(animated: true)
}
}
}
Thanks!
Replace your initialization code for the scanner with the following code either in your viewDidLoad or some method that you'd like it to be in
// Global vars used in init below
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
func setupCaptureInputDevice() {
let cameraMediaType = AVMediaType.video
captureSession = AVCaptureSession()
// get the video capture device, which should be of type video
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else {
// if there is an error then something is wrong, so dismiss
dismiss(animated: true, completion: nil)
return
}
let videoInput: AVCaptureDeviceInput
// create a capture input for the above device input that was created
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
// this is important to check for if we are able to add the input
// because adding before this could cause a crash or it could not happen
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
// dismiss or display error
return
}
// get ready to capture output somewhere
let metadataOutput = AVCaptureMetadataOutput()
// again check to make sure that we can do this operation before doing it
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
// setting the metadataOutput's delegate to be self and then requesting it run on the main thread
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
// specify your code type
metadataOutput.metadataObjectTypes = [.pdf417]
} else {
// dismiss or display error
return
}
// the preview layer now becomes the capture session
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
// just add it to the screen
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
// and begin input
captureSession.startRunning()
}

Var init in do-catch

Following code:
// Setup components
do {
let captureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
let deviceInput = try AVCaptureDeviceInput(device: captureDevice)
let output = AVCaptureMetadataOutput()
let session = AVCaptureSession()
} catch {
return false
}
After this you can't access the initialized variables. The error is "Use of unresolved identifier" if I want to access e.g. deviceInput. But why? Either AVCaptureDeviceInput() crashes and the catch-Block returns or all is right and the variables are successfully initialized. What's the best solution to solve this ?
Vacawama's answer is perfectly correct, but just for educational purposes, here's a simplified version. You don't need anything but the initialization of deviceInput to happen inside a do block:
func test() {
let captureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
let deviceInput : AVCaptureDeviceInput
do { deviceInput = try AVCaptureDeviceInput(device: captureDevice) } catch {return}
let output = AVCaptureMetadataOutput()
let session = AVCaptureSession()
// ... other stuff here
print("got to here")
}
If the try fails, "got to here" never prints; we have exited the function in good order.
Still another approach might be to let your surrounding function throw and just go for it, with no do...catch at all:
func test() throws {
let captureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
let deviceInput = try AVCaptureDeviceInput(device: captureDevice)
let output = AVCaptureMetadataOutput()
let session = AVCaptureSession()
// ... other stuff here
print("got to here")
}
This moves the onus of error-checking onto the caller of test().
The do block defines a new scope. If you declare the variables with let or var inside of the do {}, then they are only accessible within that block. If you want to use them after the do {}, then declare them before the do statement. Note, that you don't have to give them initial values, even if they are declared with let because you will only be setting them once before using them:
func foo() -> Bool {
// Setup components
let deviceInput: AVCaptureDeviceInput
let captureDevice: AVCaptureDevice
let output: AVCaptureMetadataOutput
let session: AVCaptureSession
do {
captureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
deviceInput = try AVCaptureDeviceInput(device: captureDevice)
output = AVCaptureMetadataOutput()
session = AVCaptureSession()
} catch {
return false
}
// Do something to demo that the variables are accessible
print(deviceInput.description)
print(output.description)
print(session.description)
return false
}