I am creating an app using Swift 4 and Xcode 9 that scans PDF417 barcodes using AVFoundation. The scanner works with some codes but doesn't recognize the PDF417 barcode that you would find on the front of a CA Lottery scratchers ticket for example.
Is there anything I am missing to make it work? Below is my code:
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
guard let captureDevice = deviceDiscoverySession.devices.first else {
print("Failed to get the camera device")
return
}
do {
captureSession = AVCaptureSession()
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession!.addInput(input)
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession!.addOutput(captureMetadataOutput)
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.pdf417]
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
} catch {
print(error)
return
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
//Get the metadata object
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if scanType.contains(metadataObj.type) {
let barCodeObj = videoPreviewLayer?.transformedMetadataObject(for: metadataObj)
if(metadataObj.stringValue != nil) {
callDelegate(metadataObj.stringValue)
captureSession?.stopRunning()
AudioServicesPlayAlertSound(SystemSoundID(kSystemSoundID_Vibrate))
navigationController?.popViewController(animated: true)
}
}
}
Thanks!
Replace your initialization code for the scanner with the following code either in your viewDidLoad or some method that you'd like it to be in
// Global vars used in init below
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
func setupCaptureInputDevice() {
let cameraMediaType = AVMediaType.video
captureSession = AVCaptureSession()
// get the video capture device, which should be of type video
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else {
// if there is an error then something is wrong, so dismiss
dismiss(animated: true, completion: nil)
return
}
let videoInput: AVCaptureDeviceInput
// create a capture input for the above device input that was created
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
// this is important to check for if we are able to add the input
// because adding before this could cause a crash or it could not happen
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
// dismiss or display error
return
}
// get ready to capture output somewhere
let metadataOutput = AVCaptureMetadataOutput()
// again check to make sure that we can do this operation before doing it
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
// setting the metadataOutput's delegate to be self and then requesting it run on the main thread
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
// specify your code type
metadataOutput.metadataObjectTypes = [.pdf417]
} else {
// dismiss or display error
return
}
// the preview layer now becomes the capture session
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
// just add it to the screen
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
// and begin input
captureSession.startRunning()
}
Related
I built a QR scanner in Swift 5. It will acknowledge the QR Code and scan it, however it only pulls up the url that is embedded in the QR code. Does anyone have any advice on how to make it so that I can tap the link and open it in a browser?
This is the code I have for the scanner:
import UIKit
import AVFoundation
extension QRScannerController: AVCaptureMetadataOutputObjectsDelegate {
}
class QRScannerController: UIViewController {
var captureSession = AVCaptureSession()
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var qrcodeFrameView: UIView?
#IBOutlet var messageLabel: UILabel!
#IBOutlet var topBar: UIView!
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
// Check if the metadataObjects array is not nil and it cotains at least one object
if metadataObjects.count == 0 {
qrcodeFrameView?.frame = CGRect.zero
messageLabel.text = "No QR Code is detected"
return
}
//Get the metadata object
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if metadataObj.type == AVMetadataObject.ObjectType.qr {
// If the found metadata is equal to the QR code metadata then update the status label's text and set the bounds
let barCodeObject = videoPreviewLayer?.transformedMetadataObject(for: metadataObj)
qrcodeFrameView?.frame = barCodeObject!.bounds
if metadataObj.stringValue != nil {
messageLabel.text = metadataObj.stringValue
}
}
}
override func viewDidLoad() {
super.viewDidLoad()
// get back camera for capture
guard let captureDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back) else {
print("Failed to get camera device")
return
}
do {
// Get an instance of the AVCaptureDeviceInput class using the previous device object
let input = try AVCaptureDeviceInput(device: captureDevice)
//Set the input device on the capture session
captureSession.addInput(input)
//Initialize a AVCaptureMetadataOutput object and set it as the output device to the capture session
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession.addOutput(captureMetadataOutput)
//Set delegate and use the default dispatch queue to execute the call back
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.qr]
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
//Start video capture
captureSession.startRunning()
// Move the message label and top bar to the front
view.bringSubviewToFront(messageLabel)
view.bringSubviewToFront(topBar)
// Initialize QR Code Frame to highlight the QR Code
qrcodeFrameView = UIView()
if let qrcodeFrameView = qrcodeFrameView {
qrcodeFrameView.layer.borderColor = UIColor.yellow.cgColor
qrcodeFrameView.layer.borderWidth = 2
view.addSubview(qrcodeFrameView)
view.bringSubviewToFront(qrcodeFrameView)
}
} catch {
print(error)
return
}
}
}
So, I am a begginer and I have been following an online tutorial. I have copied the code and I get no errors but my program does not seem to be doeing anything. This is the code:
import UIKit
import AVKit
import Vision
class viewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
override func viewDidLoad(){
super.viewDidLoad()
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
guard let captureDevice = AVCaptureDevice.default(for: .video) else{ return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice)else{ return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self , queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
guard let model = try?VNCoreMLModel(for: SqueezeNet().model)else { return }
let request = VNCoreMLRequest(model: model)
{ (finishedReq, err) in
//print(finishedReq.results)
guard let results = finishedReq.results as? [VNClassificationObservation] else { return }
guard let firstObservation = results.first else { return }
print (firstObservation.identifier, firstObservation.confidence)
}
//VNImageRequestHandler(cgImage: <#T##CGImage#>, options: [:]).perform(requests: [VNRequest])
func captureOutput(_ output: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
//print("Camera was able to capture a frame:", Date())
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)else { return }
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
}
}
One thing you forgot is asking permission to use the camera.
Something like:
AVCaptureDevice.requestAccess(for: AVMediaType.video)
{ response in
if response
{
// Access granted. You can continue.
}
else
{
// Tell the user.
}
}
Also, you have the AVCaptureSession as a local/automatic variable. It will be deallocated leaving viewDidLoad(). You must have is an instance variable. The iOS documentation even shows this.
Having made at least above two simple mistakes, I advise you to follow a tutorial on how to do the basics. Then once you've got that right and you see something on screen, add the ML stuff.
Good luck, it seems like a very nice subject!
I am building an app similar to a camera app in Xcode 10.1 using Swift. To do this, I have imported AVFoundation, and am close to finishing my code. However, upon this line of code
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
which is in this block of code
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput( device: captureDevice!)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = self.previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
There appears an error that reads "Cannot invoke initializer for type 'AVCaptureVideoPreviewLayer' with an argument list of type '(session: AVCaptureSession, () -> ())'"
I don't exactly know what this means or how to fix it as I am relatively new to programming.
Where have you initialized captureSession?
Try something like this in your UIViewController:
var captureSession = AVCaptureSession()
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
beginSession()
}
func beginSession() {
// Get an instance of the AVCaptureDevice class to initialize a device object and provide the video as the media type parameter.
if let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) {
do {
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
let input = try AVCaptureDeviceInput(device: captureDevice)
// Set the input device on the capture session.
captureSession.addInput(input)
// Initialize a AVCaptureVideoDataOutput object and set it as the output device to the capture session.
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer.
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = self.view.layer.bounds // It may be best to setup an UIView outlet instead of using self.view
self.view.layer.addSublayer(videoPreviewLayer!)
// Start video capture.
captureSession.startRunning()
} catch {
// If any error occurs, simply print it out and don't continue any more.
print(error)
return
}
}
}
Hope it helps you!
I've implemented an QR-/Barcode reader in Swift using AVFoundation framework.
For supported types I added nearly all types available:
let supportedCodeTypes = [AVMetadataObjectTypeUPCECode,
AVMetadataObjectTypeCode39Code,
AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeCode93Code,
AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypeEAN8Code,
AVMetadataObjectTypeEAN13Code,
AVMetadataObjectTypeAztecCode,
AVMetadataObjectTypePDF417Code,
AVMetadataObjectTypeQRCode,
AVMetadataObjectTypeDataMatrixCode,
AVMetadataObjectTypeITF14Code]
Now I want to add support for Code39Mod10 codes and Codabar barcodes, which is not available in the AVMetadataObjectTypes.
Is there a possibility to add custom AVMetadataObjectTypes or do I have to use some 3rd party scanner framework. And if so, can you suggest one?
Please check firebase it's allow to scan it ``` add AVCaptureVideoDataOutputSampleBufferDelegate on your view controller setup live detection:- self.view.layoutIfNeeded()
session.sessionPreset = AVCaptureSession.Preset.photo
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
if captureDevice != nil
{
let deviceInput = try! AVCaptureDeviceInput(device: captureDevice!)
let deviceOutput = AVCaptureVideoDataOutput()
deviceOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
deviceOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
session.addInput(deviceInput)
session.addOutput(deviceOutput)
let imageLayer = AVCaptureVideoPreviewLayer(session: session)
imageLayer.frame = self.viewScanner.bounds
imageLayer.videoGravity = .resizeAspectFill
viewScanner.layer.addSublayer(imageLayer)
viewScanner.bringSubviewToFront(imgScanner)
session.startRunning()
} <br/> implement delegate:- <br/> //MARK:- AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if self.session.isRunning {
if let barcodeDetector = self.barcodeDetector {
let visionImage = VisionImage(buffer: sampleBuffer)
barcodeDetector.detect(in: visionImage) { (barcodes, error) in
if let error = error {
print(error.localizedDescription)
return
}
if barcodes!.count > 0 {
let barcode = barcodes?.first
if let decodeString = barcode?.rawValue {
print("\n======================= Barcode value =======================\n \(barcode!.rawValue!)")
self.session.stopRunning()
}
}
}
}
}
}
I currently have a custom camera implemented into my application. I am running into two small issues.
1) When I switch b/w the views of the camera (front & back) the audio input dies, and only records video.
2) My method for deciding which camera view (front & back) is which, is depreciated, & I don't know how to exactly go about resolving it. For this one, the code is as follows: The depreciated part is the devices is storing as its variables. xCode is telling me: "Use AVCaptureDeviceDiscoverySession instead."
let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as! [AVCaptureDevice]
// Get the front and back-facing camera for taking photos
for device in devices {
if device.position == AVCaptureDevicePosition.back {
backFacingCamera = device
} else if device.position == AVCaptureDevicePosition.front {
frontFacingCamera = device
}
}
currentDevice = backFacingCamera
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice) else {
return
}
As for the general camera recording here are the codes:
My Variables:
let captureSession = AVCaptureSession()
var currentDevice:AVCaptureDevice?
var backFacingCamera: AVCaptureDevice?
var frontFacingCamera: AVCaptureDevice?
var videoFileOutput : AVCaptureMovieFileOutput?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?
#IBOutlet weak var recordingView: UIView!
switching cameras:
var device = AVCaptureDevice.defaultDevice(withDeviceType: .builtInWideAngleCamera, mediaType: AVMediaTypeVideo, position: .back)
func switchCameras() {
captureSession.beginConfiguration()
// Change the device based on the current camera
let newDevice = (currentDevice?.position == AVCaptureDevicePosition.back) ? frontFacingCamera : backFacingCamera
// Remove all inputs from the session
for input in captureSession.inputs {
captureSession.removeInput(input as! AVCaptureDeviceInput)
}
// Change to the new input
let cameraInput:AVCaptureDeviceInput
do {
cameraInput = try AVCaptureDeviceInput(device: newDevice)
} catch {
print(error)
return
}
if captureSession.canAddInput(cameraInput) {
captureSession.addInput(cameraInput)
}
currentDevice = newDevice
captureSession.commitConfiguration()
if currentDevice?.position == .front {
flashButton.isHidden = true
flashButton.isEnabled = false
} else if currentDevice?.position == .back {
flashButton.isHidden = false
flashButton.isEnabled = true
}
}
& In my view will appear:
mediaViewCapture.frame = CGRect(x: self.view.frame.size.width * 0, y: self.view.frame.size.height * 0, width:self.view.frame.size.width, height: self.view.frame.size.height)
self.view.addSubview(mediaViewCapture)
captureSession.sessionPreset = AVCaptureSessionPresetHigh
let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as! [AVCaptureDevice]
// Get the front and back-facing camera for taking photos
for device in devices {
if device.position == AVCaptureDevicePosition.back {
backFacingCamera = device
} else if device.position == AVCaptureDevicePosition.front {
frontFacingCamera = device
}
}
currentDevice = backFacingCamera
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice) else {
return
}
let audioInputDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio)
do
{
let audioInput = try AVCaptureDeviceInput(device: audioInputDevice)
// Add Audio Input
if captureSession.canAddInput(audioInput)
{
captureSession.addInput(audioInput)
}
else
{
NSLog("Can't Add Audio Input")
}
}
catch let error
{
NSLog("Error Getting Input Device: \(error)")
}
videoFileOutput = AVCaptureMovieFileOutput()
captureSession.addInput(captureDeviceInput)
captureSession.addOutput(videoFileOutput)
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(cameraPreviewLayer!)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
cameraPreviewLayer?.frame = mediaViewCapture.layer.frame
captureSession.startRunning()
& Finally my capture:
func capture(_ captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAt outputFileURL: URL!, fromConnections connections: [Any]!, error: Error!) {
if error == nil {
turnFlashOff()
let videoVC = VideoPreviewVC()
videoVC.url = outputFileURL
self.navigationController?.pushViewController(videoVC, animated: false)
} else {
print("Error saving the video \(error)")
}
}
You can look use AVCaptureDeviceDiscoverySession instead of AVCaptureDevice as it is deprecated following is the code for it:
let deviceDiscovery = AVCaptureDeviceDiscoverySession(deviceTypes: [AVCaptureDeviceType.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .back)
let devices = deviceDiscovery?.devices
for device in devices! {
if device.hasMediaType(AVMediaTypeVideo) {
captureDevice = device
}
}
AVCaptureDeviceType has following types: builtInMicrophone, builtInWideAngleCamera, builtInTelephotoCamera, builtInDualCamera and builtInDuoCamera.
Need to check the audioInput issue when camera is switched.