AVFoundation add support for Code39Mod10 and Codabar barcodes - swift

I've implemented an QR-/Barcode reader in Swift using AVFoundation framework.
For supported types I added nearly all types available:
let supportedCodeTypes = [AVMetadataObjectTypeUPCECode,
AVMetadataObjectTypeCode39Code,
AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeCode93Code,
AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypeEAN8Code,
AVMetadataObjectTypeEAN13Code,
AVMetadataObjectTypeAztecCode,
AVMetadataObjectTypePDF417Code,
AVMetadataObjectTypeQRCode,
AVMetadataObjectTypeDataMatrixCode,
AVMetadataObjectTypeITF14Code]
Now I want to add support for Code39Mod10 codes and Codabar barcodes, which is not available in the AVMetadataObjectTypes.
Is there a possibility to add custom AVMetadataObjectTypes or do I have to use some 3rd party scanner framework. And if so, can you suggest one?

Please check firebase it's allow to scan it ``` add AVCaptureVideoDataOutputSampleBufferDelegate on your view controller setup live detection:- self.view.layoutIfNeeded()
session.sessionPreset = AVCaptureSession.Preset.photo
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
if captureDevice != nil
{
let deviceInput = try! AVCaptureDeviceInput(device: captureDevice!)
let deviceOutput = AVCaptureVideoDataOutput()
deviceOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
deviceOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
session.addInput(deviceInput)
session.addOutput(deviceOutput)
let imageLayer = AVCaptureVideoPreviewLayer(session: session)
imageLayer.frame = self.viewScanner.bounds
imageLayer.videoGravity = .resizeAspectFill
viewScanner.layer.addSublayer(imageLayer)
viewScanner.bringSubviewToFront(imgScanner)
session.startRunning()
} <br/> implement delegate:- <br/> //MARK:- AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if self.session.isRunning {
if let barcodeDetector = self.barcodeDetector {
let visionImage = VisionImage(buffer: sampleBuffer)
barcodeDetector.detect(in: visionImage) { (barcodes, error) in
if let error = error {
print(error.localizedDescription)
return
}
if barcodes!.count > 0 {
let barcode = barcodes?.first
if let decodeString = barcode?.rawValue {
print("\n======================= Barcode value =======================\n \(barcode!.rawValue!)")
self.session.stopRunning()
}
}
}
}
}
}

Related

Image Recognition Results are not printed in Swift Playgrounds

So I have been working on playground to recognize object in live capture but when I try to print the results, the results are not printed. Here is my code. I have also tried running step through my code and it just executes return in the guard let of the results. SetupLabel func can also not be executed as it then says that there is problem with the playground
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
// make a computed property based of the properties defined in the curly braces
let label: UILabel = {
let label = UILabel()
label.textColor = .white
label.translatesAutoresizingMaskIntoConstraints = false
label.text = "Label"
label.font = label.font.withSize(30)
return label
}()
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
}
func setupCaptureSession(){
let captureSession = AVCaptureSession()
// search for devices with specifications defined
let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .back).devices
// setup capture device add input to captureSession
do{
if let captureDevice = availableDevices.first{
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
}
}catch{
print(error.localizedDescription)
}
// setup output and output to captureSession
let captureOutput = AVCaptureVideoDataOutput()
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(captureOutput)
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.frame
view.layer.addSublayer(previewLayer)
captureSession.startRunning()
}
// called when a frame is captured
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let model = try? VNCoreMLModel(for: myYolo) else {return}
let request = VNCoreMLRequest(model: model) { (finishedRequest, error) in
print(finishedRequest.results)
guard let results = finishedRequest.results as? [VNClassificationObservation] else { return }
guard let Observation = results.first else { return }
DispatchQueue.main.async(execute: {
self.label.text = "\(Observation.identifier)"
})
}
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// executes request
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
func SetupLabel(){
label.centerXAnchor.constraint(equalTo: view.centerXAnchor).isActive = true
label.bottomAnchor.constraint(equalTo: view.bottomAnchor, constant: -50).isActive = true
view.addSubview(label)
}
}
You need to run it on a real device.
Vision requests will not work in playgrounds/simulator.

Get audio channel averagePowerLevel AVFoundation swift

I'm trying to access the audio channel average with AVCaptureAudioDataOutput and then by setting the setSampleBufferDelegate.
However, the delegate is not getting called at all. What I'm I missing?
func setUpCaptureSession() {
captureSession.beginConfiguration()
captureSession.sessionPreset = AVCaptureSession.Preset.high
// Add inputs
let camera = bestCamera()
// Video
guard let captureInput = try? AVCaptureDeviceInput(device: camera),
captureSession.canAddInput(captureInput) else {
return
}
captureSession.addInput(captureInput)
if captureSession.canSetSessionPreset(.high) {
captureSession.sessionPreset = .high
}
if captureSession.canAddOutput(cameraOutput){
captureSession.addOutput(cameraOutput)
}
// Add outputs
let microphone = bestAudio()
guard let audioInput = try? AVCaptureDeviceInput(device: microphone),
captureSession.canAddInput(audioInput) else {
return
}
captureSession.addInput(audioInput)
self.audioInput = audioInput
audioDataOutput = AVCaptureAudioDataOutput()
guard captureSession.canAddOutput(audioDataOutput) else {
return
}
audioDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
captureSession.addOutput(audioDataOutput)
print("videodataoutput added")
// Recording to disk
guard captureSession.canAddOutput(fileOutput) else {
return
}
captureSession.addOutput(fileOutput)
captureSession.commitConfiguration()
}
Here I'm calling the delegate:
extension CameraController: AVCaptureAudioDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if output == audioDataOutput {
print("AUDIO OUTPUT")
DispatchQueue.main.async {
print("DELEGATE")
let channel = connection.audioChannels[1];
let averagePowerLevel = channel.averagePowerLevel
print("AVERAGE POWER: \(averagePowerLevel)")
}
}
}
}
Everything else is working but AVCaptureAudioDataOutputSampleBufferDelegate is never called.

Getting a black screen

So, I am a begginer and I have been following an online tutorial. I have copied the code and I get no errors but my program does not seem to be doeing anything. This is the code:
import UIKit
import AVKit
import Vision
class viewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
override func viewDidLoad(){
super.viewDidLoad()
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
guard let captureDevice = AVCaptureDevice.default(for: .video) else{ return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice)else{ return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self , queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
guard let model = try?VNCoreMLModel(for: SqueezeNet().model)else { return }
let request = VNCoreMLRequest(model: model)
{ (finishedReq, err) in
//print(finishedReq.results)
guard let results = finishedReq.results as? [VNClassificationObservation] else { return }
guard let firstObservation = results.first else { return }
print (firstObservation.identifier, firstObservation.confidence)
}
//VNImageRequestHandler(cgImage: <#T##CGImage#>, options: [:]).perform(requests: [VNRequest])
func captureOutput(_ output: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
//print("Camera was able to capture a frame:", Date())
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)else { return }
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
}
}
One thing you forgot is asking permission to use the camera.
Something like:
AVCaptureDevice.requestAccess(for: AVMediaType.video)
{ response in
if response
{
// Access granted. You can continue.
}
else
{
// Tell the user.
}
}
Also, you have the AVCaptureSession as a local/automatic variable. It will be deallocated leaving viewDidLoad(). You must have is an instance variable. The iOS documentation even shows this.
Having made at least above two simple mistakes, I advise you to follow a tutorial on how to do the basics. Then once you've got that right and you see something on screen, add the ML stuff.
Good luck, it seems like a very nice subject!

AVFoundation PDF417 scanner doesn't always work

I am creating an app using Swift 4 and Xcode 9 that scans PDF417 barcodes using AVFoundation. The scanner works with some codes but doesn't recognize the PDF417 barcode that you would find on the front of a CA Lottery scratchers ticket for example.
Is there anything I am missing to make it work? Below is my code:
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
guard let captureDevice = deviceDiscoverySession.devices.first else {
print("Failed to get the camera device")
return
}
do {
captureSession = AVCaptureSession()
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession!.addInput(input)
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession!.addOutput(captureMetadataOutput)
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.pdf417]
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
} catch {
print(error)
return
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
//Get the metadata object
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if scanType.contains(metadataObj.type) {
let barCodeObj = videoPreviewLayer?.transformedMetadataObject(for: metadataObj)
if(metadataObj.stringValue != nil) {
callDelegate(metadataObj.stringValue)
captureSession?.stopRunning()
AudioServicesPlayAlertSound(SystemSoundID(kSystemSoundID_Vibrate))
navigationController?.popViewController(animated: true)
}
}
}
Thanks!
Replace your initialization code for the scanner with the following code either in your viewDidLoad or some method that you'd like it to be in
// Global vars used in init below
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
func setupCaptureInputDevice() {
let cameraMediaType = AVMediaType.video
captureSession = AVCaptureSession()
// get the video capture device, which should be of type video
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else {
// if there is an error then something is wrong, so dismiss
dismiss(animated: true, completion: nil)
return
}
let videoInput: AVCaptureDeviceInput
// create a capture input for the above device input that was created
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
// this is important to check for if we are able to add the input
// because adding before this could cause a crash or it could not happen
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
// dismiss or display error
return
}
// get ready to capture output somewhere
let metadataOutput = AVCaptureMetadataOutput()
// again check to make sure that we can do this operation before doing it
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
// setting the metadataOutput's delegate to be self and then requesting it run on the main thread
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
// specify your code type
metadataOutput.metadataObjectTypes = [.pdf417]
} else {
// dismiss or display error
return
}
// the preview layer now becomes the capture session
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
// just add it to the screen
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
// and begin input
captureSession.startRunning()
}

Reading a barcode image without using cocoapods or other external API's

I'm trying to use the new Apple Vision API to detect a barcode from an image and return its details. I've successfully detected a QR code and returned a message using the CIDetector. However I can't make this work for 1 dimensional barcodes. Heres an example result:
import UIKit
import Vision
class BarcodeDetector {
func recognizeBarcode(for source: UIImage,
complete: #escaping (UIImage) -> Void) {
var resultImage = source
let detectBarcodeRequest = VNDetectBarcodesRequest { (request, error) in
if error == nil {
if let results = request.results as? [VNBarcodeObservation] {
print("Number of Barcodes found: \(results.count)")
if results.count == 0 { print("\r") }
var barcodeBoundingRects = [CGRect]()
for barcode in results {
barcodeBoundingRects.append(barcode.boundingBox)
let barcodeType = String(barcode.symbology.rawValue)?.replacingOccurrences(of: "VNBarcodeSymbology", with: "")
print("-Barcode Type: \(barcodeType!)")
if barcodeType == "QR" {
let image = CIImage(image: source)
image?.cropping(to: barcode.boundingBox)
self.qrCodeDescriptor(qrCode: barcode, qrCodeImage: image!)
}
}
resultImage = self.drawOnImage(source: resultImage, barcodeBoundingRects: barcodeBoundingRects)
}
} else {
print(error!.localizedDescription)
}
complete(resultImage)
}
let vnImage = VNImageRequestHandler(cgImage: source.cgImage!, options: [:])
try? vnImage.perform([detectBarcodeRequest])
}
private func qrCodeDescriptor(qrCode: VNBarcodeObservation, qrCodeImage: CIImage) {
if let description = qrCode.barcodeDescriptor as? CIQRCodeDescriptor {
readQRCode(qrCodeImage: qrCodeImage)
print(" -Payload: \(description.errorCorrectedPayload)")
print(" -Mask Pattern: \(description.maskPattern)")
print(" -Symbol Version: \(description.symbolVersion)\n")
}
}
private func readQRCode(qrCodeImage: CIImage) {
let detector: CIDetector = CIDetector(ofType: CIDetectorTypeQRCode, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])!
var qrCodeLink = ""
let features = detector.features(in: qrCodeImage)
for feature in features as! [CIQRCodeFeature] {
if let messageString = feature.messageString {
qrCodeLink += messageString
}
}
if qrCodeLink == "" {
print(" -No Code Message")
} else {
print(" -Code Message: \(qrCodeLink)")
}
}
How can I convert the image into an AVMetadataObject and then read it from there? Or is there a better approach?
Swift 4.1, using the Vision Framework (No 3rd party stuff or Pods)
Try this. It works for QR and for other types (Code39 in this example):
func startDetection() {
let request = VNDetectBarcodesRequest(completionHandler: self.detectHandler)
request.symbologies = [VNBarcodeSymbology.code39] // or use .QR, etc
self.requests = [request]
}
func detectHandler(request: VNRequest, error: Error?) {
guard let observations = request.results else {
//print("no result")
return
}
let results = observations.map({$0 as? VNBarcodeObservation})
for result in results {
print(result!.payloadStringValue!)
}
}
And then in:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
var requestOptions:[VNImageOption:Any] = [:]
if let camData = CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, nil) {
requestOptions = [.cameraIntrinsics:camData]
}
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: CGImagePropertyOrientation(rawValue: 6)!, options: requestOptions)
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
The rest of the implementation is the regular AVCaptureDevice and AVCaptureSession stuff. You will also need to conform to AVCaptureVideoDataOutputSampleBufferDelegate
import AVFoundation
import Vision
var captureDevice: AVCaptureDevice!
var session = AVCaptureSession()
var requests = [VNRequest]()
func viewDidLoad() {
self.setupVideo()
self.startDetection()
}
func setupVideo() {
session.sessionPreset = AVCaptureSession.Preset.photo
captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
let deviceInput = try! AVCaptureDeviceInput(device: captureDevice!)
let deviceOutput = AVCaptureVideoDataOutput()
deviceOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
deviceOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
session.addInput(deviceInput)
session.addOutput(deviceOutput)
let imageLayer = AVCaptureVideoPreviewLayer(session: session)
imageLayer.frame = imageView.bounds
imageView.layer.addSublayer(imageLayer)
session.startRunning()
}