AVCaptureConnection.previewLayer is nil in delegate AVCaptureMetadataOutputObjectsDelegate - swift

The code below can run on an iPhone and when aimed at any QR code, should print the contents. The QR code below points to example.com.
The problem is that the delegate should supply the connection: AVCaptureConnection and it does, but its previewLayer property is nil.
The following code can be pasted into a new, empty Xcode project. If you disable (comment out) line 57, and enable line 56, it works fine. But I want to place the delegate outside the CaptureView class. How do I set up the capturing such that AVCaptureMetadataOutputObjectsDelegate its previewLayer property is not nil?
import UIKit
import AVFoundation
func printMetadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], previewLayer: AVCaptureVideoPreviewLayer) {
for object in metadataObjects {
let visualCodeObject = previewLayer.transformedMetadataObject(for: object)
guard let object = visualCodeObject, let barcode = object as? AVMetadataMachineReadableCodeObject else {
NSLog("Ignoring object that is not AVMetadataMachineReadableCodeObject")
continue
}
guard let barcodeString = barcode.stringValue else {
NSLog("Captured something that's not a string")
continue
}
NSLog("Captured string %#", barcodeString)
}
}
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
private let previewLayer = AVCaptureVideoPreviewLayer()
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: AVCaptureMetadataOutputObjectsDelegate) {
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
// metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: NSObject, AVCaptureMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
guard let previewLayer = connection.videoPreviewLayer else {
print("previewLayer was nil")
return
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}

As the videoPreviewLayer documentation states:
This property is the set if you initialized the connection using
init(inputPort:videoPreviewLayer:) or
connectionWithInputPort:videoPreviewLayer:.
So in order to get a value in videoPreviewLayer property you have to setup the AVCaptureConnection object manually.
Instead, I would suggest to hide AVCaptureMetadataOutputObjectsDelegate behind a custom protocol that you can declare:
protocol CaptureViewMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection, previewLayer: AVCaptureVideoPreviewLayer)
}
then, implement the AVCaptureMetadataOutputObjectsDelegate protocol in your CaptureView and call your protocol's function passing the required AVCaptureVideoPreviewLayer. Your code will be like this:
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
private let previewLayer = AVCaptureVideoPreviewLayer()
private let delegate: CaptureViewMetadataOutputObjectsDelegate
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
delegate.metadataOutput(output, didOutput: metadataObjects, from: connection, previewLayer: previewLayer)
// printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: CaptureViewMetadataOutputObjectsDelegate) {
self.delegate = delegate
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
// metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: CaptureViewMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection, previewLayer: AVCaptureVideoPreviewLayer) {
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
Update: After some research I did found this statement in the init(inputPort:videoPreviewLayer:) function documentation:
When using addInput(_:): or addOutput(_:), connections are
automatically formed between all compatible inputs and outputs. You do
not need to manually create and add connections to the session unless
you use the primitive addInputWithNoConnections(_:) and
addOutputWithNoConnections(_:) methods.
Tha means tha when you added the device camera as input and the AVCaptureMetadataOutput as output, all the compatible AVCaptureConnection are created automatically.
I tried to create another AVCaptureConnection using the following code:
if let port = captureDeviceInput.ports.first(where: { $0.mediaType == .video }) {
let con = AVCaptureConnection(inputPort: port, videoPreviewLayer: self.previewLayer)
if captureSession.canAddConnection(con) {
captureSession.addConnection(con)
}
}
but the canAddConnection(_:) function always returns false.
After that I went and printed the connections array that the AVCaptureSession has and I saw following:
(lldb) po captureSession.connections
[<AVCaptureConnection: 0x280d67980 (AVCaptureDeviceInput: 0x280d119a0 Back Camera) -> (AVCaptureVideoPreviewLayer: 0x280d6ba40) [type:vide][enabled:1][active:1]>, <AVCaptureConnection: 0x280d7bee0 (AVCaptureDeviceInput: 0x280d119a0 Back Camera) -> (AVCaptureMetadataOutput: 0x280d700e0) [type:mobj][enabled:1][active:1]>]
So, an AVCaptureConnection has been created with the Back Camera as input and a AVCaptureVideoPreviewLayer instance (possibly the one that you created, the previewLayer property) and another one with the Back Camera as input and the AVCaptureMetadataOutput that you passed to AVCaptureSession.
The first one, obviously, did have some value to the videoPreviewLayer property:
(lldb) po captureSession.connections[0].videoPreviewLayer
▿ Optional<AVCaptureVideoPreviewLayer>
- some : <AVCaptureVideoPreviewLayer:0x280d6ba40; position = CGPoint (0 0); bounds = CGRect (0 0; 0 0); sublayers = (<CALayer: 0x280d6bc20>); masksToBounds = YES; allowsGroupOpacity = YES; inheritsTiming = NO; >
Apparently, the AVCaptureConnection instance that you are getting in metadataOutput(_:didOutput:from:) function will always be the second one. The one that is associating the Back Camera with the AVCaptureMetadataOutput.

I caught the bug.
In fact, even if when you enable this line:
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
in the correspondent metadataOutput(_, didOutput:, from:,) of CaptureView, connection.videoPreviewLayer is still nil. As the Developer Documentation says:
This property is the set if you initialized the connection using init(inputPort:videoPreviewLayer:) or connectionWithInputPort:videoPreviewLayer:.
So, in both way, connection.videoPreviewLayer will be nil.
I've update your code a little bit to make it work the way you want.
import UIKit
import AVFoundation
func printMetadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], previewLayer: AVCaptureVideoPreviewLayer) {
for object in metadataObjects {
let visualCodeObject = previewLayer.transformedMetadataObject(for: object)
guard let object = visualCodeObject, let barcode = object as? AVMetadataMachineReadableCodeObject else {
NSLog("Ignoring object that is not AVMetadataMachineReadableCodeObject")
continue
}
guard let barcodeString = barcode.stringValue else {
NSLog("Captured something that's not a string")
continue
}
NSLog("Captured string %#", barcodeString)
}
}
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
let previewLayer = AVCaptureVideoPreviewLayer()
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
if connection.videoPreviewLayer == nil {
print("connection.videoPreviewLayer was nil")
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: AVCaptureMetadataOutputObjectsDelegate) {
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
// metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: NSObject, AVCaptureMetadataOutputObjectsDelegate {
var previewLayer: AVCaptureVideoPreviewLayer?
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
guard let previewLayer = previewLayer else {
print("previewLayer was nil")
return
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
metadataDelegate.previewLayer = captureView.previewLayer
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}

Related

Image Recognition Results are not printed in Swift Playgrounds

So I have been working on playground to recognize object in live capture but when I try to print the results, the results are not printed. Here is my code. I have also tried running step through my code and it just executes return in the guard let of the results. SetupLabel func can also not be executed as it then says that there is problem with the playground
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
// make a computed property based of the properties defined in the curly braces
let label: UILabel = {
let label = UILabel()
label.textColor = .white
label.translatesAutoresizingMaskIntoConstraints = false
label.text = "Label"
label.font = label.font.withSize(30)
return label
}()
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
}
func setupCaptureSession(){
let captureSession = AVCaptureSession()
// search for devices with specifications defined
let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .back).devices
// setup capture device add input to captureSession
do{
if let captureDevice = availableDevices.first{
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
}
}catch{
print(error.localizedDescription)
}
// setup output and output to captureSession
let captureOutput = AVCaptureVideoDataOutput()
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(captureOutput)
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.frame
view.layer.addSublayer(previewLayer)
captureSession.startRunning()
}
// called when a frame is captured
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let model = try? VNCoreMLModel(for: myYolo) else {return}
let request = VNCoreMLRequest(model: model) { (finishedRequest, error) in
print(finishedRequest.results)
guard let results = finishedRequest.results as? [VNClassificationObservation] else { return }
guard let Observation = results.first else { return }
DispatchQueue.main.async(execute: {
self.label.text = "\(Observation.identifier)"
})
}
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// executes request
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
func SetupLabel(){
label.centerXAnchor.constraint(equalTo: view.centerXAnchor).isActive = true
label.bottomAnchor.constraint(equalTo: view.bottomAnchor, constant: -50).isActive = true
view.addSubview(label)
}
}
You need to run it on a real device.
Vision requests will not work in playgrounds/simulator.

Several errors in my Swift code for a camera app

I am trying to create a camera app on Xcode 10.1 using Swift for a school project. I have been working on this for a while, and still have several errors.
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let captureSession = AVCaptureSession()
var previewLayer:CALayer!
var captureDevice:AVCaptureDevice?
var takePhoto = false
override func viewDidLoad() {
super.viewDidLoad()
prepareCamera()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
prepareCamera()
}
func prepareCamera() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
if let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.back) {
//if availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType:AVMediaType.video, position: .back).devices {
//let captureDevice = availableDevices
beginSession()
}
}
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput( device: captureDevice)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.sophiaradis.captureQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
}
#IBAction func takePhoto(_ sender: Any) {
takePhoto = true
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if takePhoto {
takePhoto = false
if let image = self.getImageFromSamplyBuffer(buffer: sampleBuffer){
let photoVC = UIStoryboard(name: "Main", bundle: nil).instantiateViewController(withIdentifier: "PhotoVC") as! PhotoViewController
photoVC.takenPhoto = image
DispatchQueue.main.async {
self.present(photoVC, animated: true, completion: {
self.stopCaptureSession()
})
}
}
}
}
func getImageFromSamplyBuffer (buffer:CMSampleBuffer) -> UIImage? {
if let pixelBuffer = CMSampleBufferGetImageBuffer(buffer) {
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext()
let imageRect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
if let image = context.createCGImage(ciImage, from: imageRect) {
return UIImage(cgImage: image, scale: UIScreen.main.scale, orientation: .right)
}
}
return nil
}
func stopCaptureSession () {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
}
There is an error in these lines of code:
if let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes:[AVCaptureDevice.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.back)
This error says that type AVCaptureDevice has no member Discovery Session. But when I looked online, it did.
There is a second error in these lines that follow that I cannot convert value of type 'AVCaptureDevice?' to expected argument type 'AVCaptureDevice'.
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput( device: captureDevice)
I have no idea how to fix this one at all. My next error occurs directly below that one, in these following lines
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
And this is flagged as that Initializer for conditional binding must have Optional type, not 'AVCaptureVideoPreviewLayer'.
If you can fix or even offer advice as how to fix any of these it will mean a lot to me and really make my year.
1-
let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.back)
2- if let captureDevice or force unwrap captureDevice!
let captureDeviceInput = try AVCaptureDeviceInput( device: captureDevice!)
3- AVCaptureVideoPreviewLayer doesn't return optional , so replace
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
with
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)

func captureOutput is never called

Add like to add filter to each frame i record in real time and display the filtered image in UIImageView, if anyone could help it would be nice.
but captureoutput is never called, here is my code.
class Measurement: UIViewController , AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var cameraPreview: UIView!
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
setupCameraSession()
toggleTorch(on: true)
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
view.layer.addSublayer(previewLayer)
cameraSession.startRunning()
}
lazy var cameraSession: AVCaptureSession = {
let s = AVCaptureSession()
s.sessionPreset = AVCaptureSession.Preset.low
return s
}()
lazy var previewLayer: AVCaptureVideoPreviewLayer = {
let preview = AVCaptureVideoPreviewLayer(session: self.cameraSession)
preview.position = CGPoint(x:182,y: 485)
preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
preview.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
preview.bounds = imageView.bounds
//preview.position = CGPoint(x:self.view.bounds.midX,y: self.view.bounds.midY)
imageView.layer.addSublayer(preview)
return preview
}()
func toggleTorch(on: Bool) {
guard let device = AVCaptureDevice.default(for: .video) else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
func setupCameraSession() {
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
do {
let deviceInput = try AVCaptureDeviceInput(device: captureDevice!)
cameraSession.beginConfiguration()
if (cameraSession.canAddInput(deviceInput) == true) {
cameraSession.addInput(deviceInput)
print("Processing Data.")
}
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA as UInt32)] as [String : AnyObject]
dataOutput.alwaysDiscardsLateVideoFrames = true
print("Processing Data.")
if (cameraSession.canAddOutput(dataOutput) == true) {
cameraSession.addOutput(dataOutput)
print("Processing Data.")
}
cameraSession.commitConfiguration()
let queue = DispatchQueue(label: "com.invasivecode.videoQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
catch let error as NSError {
print("\(error), \(error.localizedDescription)")
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("Processing Data.")
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
//let chromaKeyFilter = colorCubeFilterForChromaKey(hueAngle: 120)
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
let context = CIContext()
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return }
let image = UIImage(cgImage: cgImage)
if let chromaKeyFilter = CIFilter(name: "CISepiaTone") {
let beginImage = CIImage(image: image)
chromaKeyFilter.setValue(beginImage, forKey: kCIInputImageKey)
chromaKeyFilter.setValue(0.5, forKey: kCIInputIntensityKey)
if let output = chromaKeyFilter.outputImage {
if let cgimg = context.createCGImage(output, from: output.extent) {
let processedImage = UIImage(cgImage: cgimg)
// do something interesting with the processed image
imageView.image = processedImage
}
}
}
}
func captureOutput(_ captureOutput: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// Here you can count how many frames are dopped
}
func startCapture() {
print("\(self.classForCoder)/" + #function)
if cameraSession.isRunning {
print("already running")
return
}
cameraSession.startRunning()
toggleTorch(on: true)
}
You need to set the delegate
dataOutput.sampleBufferDelegate = self

How to custom size AVcapture

I need to capture for my barcode But this my code capture is full screen.
How to custom size or fix to small size.
please let my idea or code for custom size this thank you.
This my code capture is full screen.
import UIKit
import AVFoundation
protocol BarcodeDelegate {
func barcodeReaded(barcode: String)
}
class barcodeCapViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var delegate: BarcodeDelegate?
var captureSession: AVCaptureSession!
var code: String?
override func viewDidLoad() {
super.viewDidLoad()
self.captureSession = AVCaptureSession();
let videoCaptureDevice: AVCaptureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
do {
let videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
if self.captureSession.canAddInput(videoInput) {
self.captureSession.addInput(videoInput)
} else {
print("Could not add video input")
}
let metadataOutput = AVCaptureMetadataOutput()
if self.captureSession.canAddOutput(metadataOutput) {
self.captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
metadataOutput.metadataObjectTypes = [AVMetadataObjectTypeQRCode, AVMetadataObjectTypeEAN13Code]
} else {
print("Could not add metadata output")
}
let previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.frame = self.view.layer.bounds
self.view.layer .addSublayer(previewLayer)
self.captureSession.startRunning()
} catch let error as NSError {
print("Error while creating vide input device: \(error.localizedDescription)")
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]!, fromConnection connection: AVCaptureConnection!) {
for metadata in metadataObjects {
let readableObject = metadata as! AVMetadataMachineReadableCodeObject
let code = readableObject.stringValue
if !code.isEmpty {
self.captureSession.stopRunning()
self.dismissViewControllerAnimated(true, completion: nil)
self.delegate?.barcodeReaded(code)
}
}
}
}
When I add CGRectMake(20, 40, 200, 50)
show this
Add CGRectMake(20, 40, 500, 100)
show this
I don'n know why width and height not add up follow code.
Change the frame size of your AVCaptureVideoPreviewLayer:
let previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.frame = CGRectMake(10, 20, 100, 50) // something else!
If you're using autolayout, you probably don't want to deal with CALayer frames so you should create a UIView subclass, add your AVCaptureVideoPreviewLayer to that and set the layer's frame in layoutSubviews:
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}

AVCaptureVideoDataOutput captureOutput not being called

I'm trying screen capture on a Mac with AVCaptureScreenInput, but AVCaptureVideoDataOutput delegate captureOutput is never called, and I'm not sure why. I do get a notification saying the capture session was started.
import Cocoa
import AVFoundation
class ViewController: NSViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
var captureSession: AVCaptureSession!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
}
override func viewWillAppear() {
super.viewWillAppear()
NSNotificationCenter.defaultCenter().addObserver(self, selector: #selector(ViewController.errorNotif), name: AVCaptureSessionRuntimeErrorNotification, object: nil)
NSNotificationCenter.defaultCenter().addObserver(self, selector: #selector(ViewController.startedNotif), name: AVCaptureSessionDidStartRunningNotification, object: nil)
startScreenCapture()
}
override func viewWillDisappear() {
super.viewWillDisappear()
NSNotificationCenter.defaultCenter().removeObserver(self)
}
func captureOutput(captureOutput: AVCaptureOutput!, didDropSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
print("ignore frame, add code to handle later")
}
func startScreenCapture() {
let displayId = CGMainDisplayID()
captureSession = AVCaptureSession()
if captureSession.canSetSessionPreset(AVCaptureSessionPresetHigh) {
captureSession.sessionPreset = AVCaptureSessionPresetHigh
}
let captureScreenInput = AVCaptureScreenInput(displayID: displayId)
if captureSession.canAddInput(captureScreenInput) {
captureSession.addInput(captureScreenInput)
} else {
print("Could not add main display to capture input")
}
let output = AVCaptureVideoDataOutput()
let queue = dispatch_queue_create("myQueue", DISPATCH_QUEUE_SERIAL)
output.setSampleBufferDelegate(self, queue: queue)
output.alwaysDiscardsLateVideoFrames = true
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as NSString: NSNumber(unsignedInt: kCVPixelFormatType_32BGRA)]
captureSession.addOutput(output)
captureSession.startRunning()
}
func errorNotif() {
print("error starting capture")
}
func startedNotif() {
print("started screen capture")
}
}
I added for basic example AVCaptureVideoDataOutputSampleBufferDelegate
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let videoQueue = DispatchQueue(label: "VIDEO_QUEUE")
override func viewDidLoad() {
super.viewDidLoad()
let captureSession = AVCaptureSession()
guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: videoQueue)
captureSession.addOutput(dataOutput)
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print("Camera was able to capture a frame:", Date())
}
}
You need to define the didOutputSampleBuffer delegate callback to actually receive the captured frames:
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
print("captured \(sampleBuffer)")
}
p.s. I'm not sure about macOS, but viewWillAppear may not be a good place to do initialisation because on iOS at least it can be called multiple times.