Get audio channel averagePowerLevel AVFoundation swift - swift

I'm trying to access the audio channel average with AVCaptureAudioDataOutput and then by setting the setSampleBufferDelegate.
However, the delegate is not getting called at all. What I'm I missing?
func setUpCaptureSession() {
captureSession.beginConfiguration()
captureSession.sessionPreset = AVCaptureSession.Preset.high
// Add inputs
let camera = bestCamera()
// Video
guard let captureInput = try? AVCaptureDeviceInput(device: camera),
captureSession.canAddInput(captureInput) else {
return
}
captureSession.addInput(captureInput)
if captureSession.canSetSessionPreset(.high) {
captureSession.sessionPreset = .high
}
if captureSession.canAddOutput(cameraOutput){
captureSession.addOutput(cameraOutput)
}
// Add outputs
let microphone = bestAudio()
guard let audioInput = try? AVCaptureDeviceInput(device: microphone),
captureSession.canAddInput(audioInput) else {
return
}
captureSession.addInput(audioInput)
self.audioInput = audioInput
audioDataOutput = AVCaptureAudioDataOutput()
guard captureSession.canAddOutput(audioDataOutput) else {
return
}
audioDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
captureSession.addOutput(audioDataOutput)
print("videodataoutput added")
// Recording to disk
guard captureSession.canAddOutput(fileOutput) else {
return
}
captureSession.addOutput(fileOutput)
captureSession.commitConfiguration()
}
Here I'm calling the delegate:
extension CameraController: AVCaptureAudioDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if output == audioDataOutput {
print("AUDIO OUTPUT")
DispatchQueue.main.async {
print("DELEGATE")
let channel = connection.audioChannels[1];
let averagePowerLevel = channel.averagePowerLevel
print("AVERAGE POWER: \(averagePowerLevel)")
}
}
}
}
Everything else is working but AVCaptureAudioDataOutputSampleBufferDelegate is never called.

Related

Getting a black screen

So, I am a begginer and I have been following an online tutorial. I have copied the code and I get no errors but my program does not seem to be doeing anything. This is the code:
import UIKit
import AVKit
import Vision
class viewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
override func viewDidLoad(){
super.viewDidLoad()
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
guard let captureDevice = AVCaptureDevice.default(for: .video) else{ return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice)else{ return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self , queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
guard let model = try?VNCoreMLModel(for: SqueezeNet().model)else { return }
let request = VNCoreMLRequest(model: model)
{ (finishedReq, err) in
//print(finishedReq.results)
guard let results = finishedReq.results as? [VNClassificationObservation] else { return }
guard let firstObservation = results.first else { return }
print (firstObservation.identifier, firstObservation.confidence)
}
//VNImageRequestHandler(cgImage: <#T##CGImage#>, options: [:]).perform(requests: [VNRequest])
func captureOutput(_ output: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
//print("Camera was able to capture a frame:", Date())
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)else { return }
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
}
}
One thing you forgot is asking permission to use the camera.
Something like:
AVCaptureDevice.requestAccess(for: AVMediaType.video)
{ response in
if response
{
// Access granted. You can continue.
}
else
{
// Tell the user.
}
}
Also, you have the AVCaptureSession as a local/automatic variable. It will be deallocated leaving viewDidLoad(). You must have is an instance variable. The iOS documentation even shows this.
Having made at least above two simple mistakes, I advise you to follow a tutorial on how to do the basics. Then once you've got that right and you see something on screen, add the ML stuff.
Good luck, it seems like a very nice subject!

AVFoundation PDF417 scanner doesn't always work

I am creating an app using Swift 4 and Xcode 9 that scans PDF417 barcodes using AVFoundation. The scanner works with some codes but doesn't recognize the PDF417 barcode that you would find on the front of a CA Lottery scratchers ticket for example.
Is there anything I am missing to make it work? Below is my code:
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
guard let captureDevice = deviceDiscoverySession.devices.first else {
print("Failed to get the camera device")
return
}
do {
captureSession = AVCaptureSession()
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession!.addInput(input)
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession!.addOutput(captureMetadataOutput)
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.pdf417]
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
} catch {
print(error)
return
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
//Get the metadata object
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if scanType.contains(metadataObj.type) {
let barCodeObj = videoPreviewLayer?.transformedMetadataObject(for: metadataObj)
if(metadataObj.stringValue != nil) {
callDelegate(metadataObj.stringValue)
captureSession?.stopRunning()
AudioServicesPlayAlertSound(SystemSoundID(kSystemSoundID_Vibrate))
navigationController?.popViewController(animated: true)
}
}
}
Thanks!
Replace your initialization code for the scanner with the following code either in your viewDidLoad or some method that you'd like it to be in
// Global vars used in init below
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
func setupCaptureInputDevice() {
let cameraMediaType = AVMediaType.video
captureSession = AVCaptureSession()
// get the video capture device, which should be of type video
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else {
// if there is an error then something is wrong, so dismiss
dismiss(animated: true, completion: nil)
return
}
let videoInput: AVCaptureDeviceInput
// create a capture input for the above device input that was created
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
// this is important to check for if we are able to add the input
// because adding before this could cause a crash or it could not happen
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
// dismiss or display error
return
}
// get ready to capture output somewhere
let metadataOutput = AVCaptureMetadataOutput()
// again check to make sure that we can do this operation before doing it
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
// setting the metadataOutput's delegate to be self and then requesting it run on the main thread
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
// specify your code type
metadataOutput.metadataObjectTypes = [.pdf417]
} else {
// dismiss or display error
return
}
// the preview layer now becomes the capture session
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
// just add it to the screen
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
// and begin input
captureSession.startRunning()
}

func captureOutput is never called

Add like to add filter to each frame i record in real time and display the filtered image in UIImageView, if anyone could help it would be nice.
but captureoutput is never called, here is my code.
class Measurement: UIViewController , AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var cameraPreview: UIView!
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
setupCameraSession()
toggleTorch(on: true)
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
view.layer.addSublayer(previewLayer)
cameraSession.startRunning()
}
lazy var cameraSession: AVCaptureSession = {
let s = AVCaptureSession()
s.sessionPreset = AVCaptureSession.Preset.low
return s
}()
lazy var previewLayer: AVCaptureVideoPreviewLayer = {
let preview = AVCaptureVideoPreviewLayer(session: self.cameraSession)
preview.position = CGPoint(x:182,y: 485)
preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
preview.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
preview.bounds = imageView.bounds
//preview.position = CGPoint(x:self.view.bounds.midX,y: self.view.bounds.midY)
imageView.layer.addSublayer(preview)
return preview
}()
func toggleTorch(on: Bool) {
guard let device = AVCaptureDevice.default(for: .video) else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
func setupCameraSession() {
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
do {
let deviceInput = try AVCaptureDeviceInput(device: captureDevice!)
cameraSession.beginConfiguration()
if (cameraSession.canAddInput(deviceInput) == true) {
cameraSession.addInput(deviceInput)
print("Processing Data.")
}
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA as UInt32)] as [String : AnyObject]
dataOutput.alwaysDiscardsLateVideoFrames = true
print("Processing Data.")
if (cameraSession.canAddOutput(dataOutput) == true) {
cameraSession.addOutput(dataOutput)
print("Processing Data.")
}
cameraSession.commitConfiguration()
let queue = DispatchQueue(label: "com.invasivecode.videoQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
catch let error as NSError {
print("\(error), \(error.localizedDescription)")
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("Processing Data.")
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
//let chromaKeyFilter = colorCubeFilterForChromaKey(hueAngle: 120)
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
let context = CIContext()
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return }
let image = UIImage(cgImage: cgImage)
if let chromaKeyFilter = CIFilter(name: "CISepiaTone") {
let beginImage = CIImage(image: image)
chromaKeyFilter.setValue(beginImage, forKey: kCIInputImageKey)
chromaKeyFilter.setValue(0.5, forKey: kCIInputIntensityKey)
if let output = chromaKeyFilter.outputImage {
if let cgimg = context.createCGImage(output, from: output.extent) {
let processedImage = UIImage(cgImage: cgimg)
// do something interesting with the processed image
imageView.image = processedImage
}
}
}
}
func captureOutput(_ captureOutput: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// Here you can count how many frames are dopped
}
func startCapture() {
print("\(self.classForCoder)/" + #function)
if cameraSession.isRunning {
print("already running")
return
}
cameraSession.startRunning()
toggleTorch(on: true)
}
You need to set the delegate
dataOutput.sampleBufferDelegate = self

AVFoundation add support for Code39Mod10 and Codabar barcodes

I've implemented an QR-/Barcode reader in Swift using AVFoundation framework.
For supported types I added nearly all types available:
let supportedCodeTypes = [AVMetadataObjectTypeUPCECode,
AVMetadataObjectTypeCode39Code,
AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeCode93Code,
AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypeEAN8Code,
AVMetadataObjectTypeEAN13Code,
AVMetadataObjectTypeAztecCode,
AVMetadataObjectTypePDF417Code,
AVMetadataObjectTypeQRCode,
AVMetadataObjectTypeDataMatrixCode,
AVMetadataObjectTypeITF14Code]
Now I want to add support for Code39Mod10 codes and Codabar barcodes, which is not available in the AVMetadataObjectTypes.
Is there a possibility to add custom AVMetadataObjectTypes or do I have to use some 3rd party scanner framework. And if so, can you suggest one?
Please check firebase it's allow to scan it ``` add AVCaptureVideoDataOutputSampleBufferDelegate on your view controller setup live detection:- self.view.layoutIfNeeded()
session.sessionPreset = AVCaptureSession.Preset.photo
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
if captureDevice != nil
{
let deviceInput = try! AVCaptureDeviceInput(device: captureDevice!)
let deviceOutput = AVCaptureVideoDataOutput()
deviceOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
deviceOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
session.addInput(deviceInput)
session.addOutput(deviceOutput)
let imageLayer = AVCaptureVideoPreviewLayer(session: session)
imageLayer.frame = self.viewScanner.bounds
imageLayer.videoGravity = .resizeAspectFill
viewScanner.layer.addSublayer(imageLayer)
viewScanner.bringSubviewToFront(imgScanner)
session.startRunning()
} <br/> implement delegate:- <br/> //MARK:- AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if self.session.isRunning {
if let barcodeDetector = self.barcodeDetector {
let visionImage = VisionImage(buffer: sampleBuffer)
barcodeDetector.detect(in: visionImage) { (barcodes, error) in
if let error = error {
print(error.localizedDescription)
return
}
if barcodes!.count > 0 {
let barcode = barcodes?.first
if let decodeString = barcode?.rawValue {
print("\n======================= Barcode value =======================\n \(barcode!.rawValue!)")
self.session.stopRunning()
}
}
}
}
}
}

Reading a barcode image without using cocoapods or other external API's

I'm trying to use the new Apple Vision API to detect a barcode from an image and return its details. I've successfully detected a QR code and returned a message using the CIDetector. However I can't make this work for 1 dimensional barcodes. Heres an example result:
import UIKit
import Vision
class BarcodeDetector {
func recognizeBarcode(for source: UIImage,
complete: #escaping (UIImage) -> Void) {
var resultImage = source
let detectBarcodeRequest = VNDetectBarcodesRequest { (request, error) in
if error == nil {
if let results = request.results as? [VNBarcodeObservation] {
print("Number of Barcodes found: \(results.count)")
if results.count == 0 { print("\r") }
var barcodeBoundingRects = [CGRect]()
for barcode in results {
barcodeBoundingRects.append(barcode.boundingBox)
let barcodeType = String(barcode.symbology.rawValue)?.replacingOccurrences(of: "VNBarcodeSymbology", with: "")
print("-Barcode Type: \(barcodeType!)")
if barcodeType == "QR" {
let image = CIImage(image: source)
image?.cropping(to: barcode.boundingBox)
self.qrCodeDescriptor(qrCode: barcode, qrCodeImage: image!)
}
}
resultImage = self.drawOnImage(source: resultImage, barcodeBoundingRects: barcodeBoundingRects)
}
} else {
print(error!.localizedDescription)
}
complete(resultImage)
}
let vnImage = VNImageRequestHandler(cgImage: source.cgImage!, options: [:])
try? vnImage.perform([detectBarcodeRequest])
}
private func qrCodeDescriptor(qrCode: VNBarcodeObservation, qrCodeImage: CIImage) {
if let description = qrCode.barcodeDescriptor as? CIQRCodeDescriptor {
readQRCode(qrCodeImage: qrCodeImage)
print(" -Payload: \(description.errorCorrectedPayload)")
print(" -Mask Pattern: \(description.maskPattern)")
print(" -Symbol Version: \(description.symbolVersion)\n")
}
}
private func readQRCode(qrCodeImage: CIImage) {
let detector: CIDetector = CIDetector(ofType: CIDetectorTypeQRCode, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])!
var qrCodeLink = ""
let features = detector.features(in: qrCodeImage)
for feature in features as! [CIQRCodeFeature] {
if let messageString = feature.messageString {
qrCodeLink += messageString
}
}
if qrCodeLink == "" {
print(" -No Code Message")
} else {
print(" -Code Message: \(qrCodeLink)")
}
}
How can I convert the image into an AVMetadataObject and then read it from there? Or is there a better approach?
Swift 4.1, using the Vision Framework (No 3rd party stuff or Pods)
Try this. It works for QR and for other types (Code39 in this example):
func startDetection() {
let request = VNDetectBarcodesRequest(completionHandler: self.detectHandler)
request.symbologies = [VNBarcodeSymbology.code39] // or use .QR, etc
self.requests = [request]
}
func detectHandler(request: VNRequest, error: Error?) {
guard let observations = request.results else {
//print("no result")
return
}
let results = observations.map({$0 as? VNBarcodeObservation})
for result in results {
print(result!.payloadStringValue!)
}
}
And then in:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
var requestOptions:[VNImageOption:Any] = [:]
if let camData = CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, nil) {
requestOptions = [.cameraIntrinsics:camData]
}
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: CGImagePropertyOrientation(rawValue: 6)!, options: requestOptions)
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
The rest of the implementation is the regular AVCaptureDevice and AVCaptureSession stuff. You will also need to conform to AVCaptureVideoDataOutputSampleBufferDelegate
import AVFoundation
import Vision
var captureDevice: AVCaptureDevice!
var session = AVCaptureSession()
var requests = [VNRequest]()
func viewDidLoad() {
self.setupVideo()
self.startDetection()
}
func setupVideo() {
session.sessionPreset = AVCaptureSession.Preset.photo
captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
let deviceInput = try! AVCaptureDeviceInput(device: captureDevice!)
let deviceOutput = AVCaptureVideoDataOutput()
deviceOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
deviceOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
session.addInput(deviceInput)
session.addOutput(deviceOutput)
let imageLayer = AVCaptureVideoPreviewLayer(session: session)
imageLayer.frame = imageView.bounds
imageView.layer.addSublayer(imageLayer)
session.startRunning()
}