I am trying to add an scanner to my application but everytime when I run the application it sais that there is a problem in my code and that I have to try again
The code what I have is from https://www.hackingwithswift.com/example-code/media/how-to-scan-a-barcode, I tried to paste it on to an blank document and a Xcode-playground document but both of them didn’t work.
Import AVFoundation
import UIKit
class ScannerViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor.black
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.ean8, .ean13, .pdf417]
} else {
failed()
return
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
captureSession.startRunning()
}
func failed() {
let ac = UIAlertController(title: "Scanning not supported", message: "Your device does not support scanning a code from an item. Please use a device with a camera.", preferredStyle: .alert)
ac.addAction(UIAlertAction(title: "OK", style: .default))
present(ac, animated: true)
captureSession = nil
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if (captureSession?.isRunning == false) {
captureSession.startRunning()
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
if (captureSession?.isRunning == true) {
captureSession.stopRunning()
}
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return }
guard let stringValue = readableObject.stringValue else { return }
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: stringValue)
}
dismiss(animated: true)
}
func found(code: String) {
print(code)
}
override var prefersStatusBarHidden: Bool {
return true
}
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
return .portrait
}
}
The code below can run on an iPhone and when aimed at any QR code, should print the contents. The QR code below points to example.com.
The problem is that the delegate should supply the connection: AVCaptureConnection and it does, but its previewLayer property is nil.
The following code can be pasted into a new, empty Xcode project. If you disable (comment out) line 57, and enable line 56, it works fine. But I want to place the delegate outside the CaptureView class. How do I set up the capturing such that AVCaptureMetadataOutputObjectsDelegate its previewLayer property is not nil?
import UIKit
import AVFoundation
func printMetadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], previewLayer: AVCaptureVideoPreviewLayer) {
for object in metadataObjects {
let visualCodeObject = previewLayer.transformedMetadataObject(for: object)
guard let object = visualCodeObject, let barcode = object as? AVMetadataMachineReadableCodeObject else {
NSLog("Ignoring object that is not AVMetadataMachineReadableCodeObject")
continue
}
guard let barcodeString = barcode.stringValue else {
NSLog("Captured something that's not a string")
continue
}
NSLog("Captured string %#", barcodeString)
}
}
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
private let previewLayer = AVCaptureVideoPreviewLayer()
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: AVCaptureMetadataOutputObjectsDelegate) {
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
// metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: NSObject, AVCaptureMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
guard let previewLayer = connection.videoPreviewLayer else {
print("previewLayer was nil")
return
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
As the videoPreviewLayer documentation states:
This property is the set if you initialized the connection using
init(inputPort:videoPreviewLayer:) or
connectionWithInputPort:videoPreviewLayer:.
So in order to get a value in videoPreviewLayer property you have to setup the AVCaptureConnection object manually.
Instead, I would suggest to hide AVCaptureMetadataOutputObjectsDelegate behind a custom protocol that you can declare:
protocol CaptureViewMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection, previewLayer: AVCaptureVideoPreviewLayer)
}
then, implement the AVCaptureMetadataOutputObjectsDelegate protocol in your CaptureView and call your protocol's function passing the required AVCaptureVideoPreviewLayer. Your code will be like this:
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
private let previewLayer = AVCaptureVideoPreviewLayer()
private let delegate: CaptureViewMetadataOutputObjectsDelegate
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
delegate.metadataOutput(output, didOutput: metadataObjects, from: connection, previewLayer: previewLayer)
// printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: CaptureViewMetadataOutputObjectsDelegate) {
self.delegate = delegate
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
// metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: CaptureViewMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection, previewLayer: AVCaptureVideoPreviewLayer) {
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
Update: After some research I did found this statement in the init(inputPort:videoPreviewLayer:) function documentation:
When using addInput(_:): or addOutput(_:), connections are
automatically formed between all compatible inputs and outputs. You do
not need to manually create and add connections to the session unless
you use the primitive addInputWithNoConnections(_:) and
addOutputWithNoConnections(_:) methods.
Tha means tha when you added the device camera as input and the AVCaptureMetadataOutput as output, all the compatible AVCaptureConnection are created automatically.
I tried to create another AVCaptureConnection using the following code:
if let port = captureDeviceInput.ports.first(where: { $0.mediaType == .video }) {
let con = AVCaptureConnection(inputPort: port, videoPreviewLayer: self.previewLayer)
if captureSession.canAddConnection(con) {
captureSession.addConnection(con)
}
}
but the canAddConnection(_:) function always returns false.
After that I went and printed the connections array that the AVCaptureSession has and I saw following:
(lldb) po captureSession.connections
[<AVCaptureConnection: 0x280d67980 (AVCaptureDeviceInput: 0x280d119a0 Back Camera) -> (AVCaptureVideoPreviewLayer: 0x280d6ba40) [type:vide][enabled:1][active:1]>, <AVCaptureConnection: 0x280d7bee0 (AVCaptureDeviceInput: 0x280d119a0 Back Camera) -> (AVCaptureMetadataOutput: 0x280d700e0) [type:mobj][enabled:1][active:1]>]
So, an AVCaptureConnection has been created with the Back Camera as input and a AVCaptureVideoPreviewLayer instance (possibly the one that you created, the previewLayer property) and another one with the Back Camera as input and the AVCaptureMetadataOutput that you passed to AVCaptureSession.
The first one, obviously, did have some value to the videoPreviewLayer property:
(lldb) po captureSession.connections[0].videoPreviewLayer
▿ Optional<AVCaptureVideoPreviewLayer>
- some : <AVCaptureVideoPreviewLayer:0x280d6ba40; position = CGPoint (0 0); bounds = CGRect (0 0; 0 0); sublayers = (<CALayer: 0x280d6bc20>); masksToBounds = YES; allowsGroupOpacity = YES; inheritsTiming = NO; >
Apparently, the AVCaptureConnection instance that you are getting in metadataOutput(_:didOutput:from:) function will always be the second one. The one that is associating the Back Camera with the AVCaptureMetadataOutput.
I caught the bug.
In fact, even if when you enable this line:
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
in the correspondent metadataOutput(_, didOutput:, from:,) of CaptureView, connection.videoPreviewLayer is still nil. As the Developer Documentation says:
This property is the set if you initialized the connection using init(inputPort:videoPreviewLayer:) or connectionWithInputPort:videoPreviewLayer:.
So, in both way, connection.videoPreviewLayer will be nil.
I've update your code a little bit to make it work the way you want.
import UIKit
import AVFoundation
func printMetadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], previewLayer: AVCaptureVideoPreviewLayer) {
for object in metadataObjects {
let visualCodeObject = previewLayer.transformedMetadataObject(for: object)
guard let object = visualCodeObject, let barcode = object as? AVMetadataMachineReadableCodeObject else {
NSLog("Ignoring object that is not AVMetadataMachineReadableCodeObject")
continue
}
guard let barcodeString = barcode.stringValue else {
NSLog("Captured something that's not a string")
continue
}
NSLog("Captured string %#", barcodeString)
}
}
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
let previewLayer = AVCaptureVideoPreviewLayer()
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
if connection.videoPreviewLayer == nil {
print("connection.videoPreviewLayer was nil")
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: AVCaptureMetadataOutputObjectsDelegate) {
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
// metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: NSObject, AVCaptureMetadataOutputObjectsDelegate {
var previewLayer: AVCaptureVideoPreviewLayer?
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
guard let previewLayer = previewLayer else {
print("previewLayer was nil")
return
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
metadataDelegate.previewLayer = captureView.previewLayer
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
My swift code should be able to take a snapshot of a video and then take that image and display in a uiimageview. Instead of using a online link I just want the url to be the uiview in my class.So the video url should be previewView not the https link that I have below. All the code below is in this class
import UIKit;import AVFoundation
class ViewController: UIViewController, AVCapturePhotoCaptureDelegate {
#IBOutlet var previewView : UIView!
#IBOutlet var captureImageView : UIImageView!
var captureSession: AVCaptureSession!
var stillImageOutput: AVCapturePhotoOutput!
var videoPreviewLayer: AVCaptureVideoPreviewLayer!
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
// Setup your camera here...
captureSession = AVCaptureSession()
captureSession.sessionPreset = .medium
guard let backCamera = AVCaptureDevice.default(for: AVMediaType.video)
else {
print("Unable to access back camera!")
return
}
do {
let input = try AVCaptureDeviceInput(device: backCamera)
//Step 9
stillImageOutput = AVCapturePhotoOutput()
stillImageOutput = AVCapturePhotoOutput()
if captureSession.canAddInput(input) && captureSession.canAddOutput(stillImageOutput) {
captureSession.addInput(input)
captureSession.addOutput(stillImageOutput)
setupLivePreview()
}
}
catch let error {
print("Error Unable to initialize back camera: \(error.localizedDescription)")
}
}
func setupLivePreview() {
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer.videoGravity = .resizeAspect
videoPreviewLayer.connection?.videoOrientation = .portrait
previewView.layer.addSublayer(videoPreviewLayer)
//Step12
DispatchQueue.global(qos: .userInitiated).async { //[weak self] in
self.captureSession.startRunning()
//Step 13
DispatchQueue.main.async {
self.videoPreviewLayer.frame = self.previewView.bounds
}
}
}
#IBAction func startRecord(_ sender: Any) {
}
#IBAction func Save(_ sender: Any) {
//what do I put in the 2 highlighted blocks
let videoURL = "https://www.youtube.com/watch?v=Txt25dw-lIk"
self.getThumbnailFromUrl(videoURL) { [weak self] (img) in
guard let _ = self else { return }
if let img = img {
self?.captureImageView.image = img
}
}
}
func getThumbnailFromUrl(_ url: String?, _ completion: #escaping ((_ image: UIImage?)->Void)) {
guard let url = URL(string: url ?? "") else { return }
DispatchQueue.main.async {
let asset = AVAsset(url: url)
let assetImgGenerate = AVAssetImageGenerator(asset: asset)
assetImgGenerate.appliesPreferredTrackTransform = true
let time = CMTimeMake(value: 2, timescale: 1)
do {
let img = try assetImgGenerate.copyCGImage(at: time, actualTime: nil)
let thumbnail = UIImage(cgImage: img)
completion(thumbnail)
} catch {
print("Error :: ", error.localizedDescription)
completion(nil)
}
}
}
#IBAction func didTakePhoto(_ sender: Any) {
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
stillImageOutput.capturePhoto(with: settings, delegate: self)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
captureImageView.image = image
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
self.captureSession.stopRunning()
}
}
Add like to add filter to each frame i record in real time and display the filtered image in UIImageView, if anyone could help it would be nice.
but captureoutput is never called, here is my code.
class Measurement: UIViewController , AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var cameraPreview: UIView!
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
setupCameraSession()
toggleTorch(on: true)
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
view.layer.addSublayer(previewLayer)
cameraSession.startRunning()
}
lazy var cameraSession: AVCaptureSession = {
let s = AVCaptureSession()
s.sessionPreset = AVCaptureSession.Preset.low
return s
}()
lazy var previewLayer: AVCaptureVideoPreviewLayer = {
let preview = AVCaptureVideoPreviewLayer(session: self.cameraSession)
preview.position = CGPoint(x:182,y: 485)
preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
preview.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
preview.bounds = imageView.bounds
//preview.position = CGPoint(x:self.view.bounds.midX,y: self.view.bounds.midY)
imageView.layer.addSublayer(preview)
return preview
}()
func toggleTorch(on: Bool) {
guard let device = AVCaptureDevice.default(for: .video) else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
func setupCameraSession() {
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
do {
let deviceInput = try AVCaptureDeviceInput(device: captureDevice!)
cameraSession.beginConfiguration()
if (cameraSession.canAddInput(deviceInput) == true) {
cameraSession.addInput(deviceInput)
print("Processing Data.")
}
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA as UInt32)] as [String : AnyObject]
dataOutput.alwaysDiscardsLateVideoFrames = true
print("Processing Data.")
if (cameraSession.canAddOutput(dataOutput) == true) {
cameraSession.addOutput(dataOutput)
print("Processing Data.")
}
cameraSession.commitConfiguration()
let queue = DispatchQueue(label: "com.invasivecode.videoQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
catch let error as NSError {
print("\(error), \(error.localizedDescription)")
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("Processing Data.")
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
//let chromaKeyFilter = colorCubeFilterForChromaKey(hueAngle: 120)
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
let context = CIContext()
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return }
let image = UIImage(cgImage: cgImage)
if let chromaKeyFilter = CIFilter(name: "CISepiaTone") {
let beginImage = CIImage(image: image)
chromaKeyFilter.setValue(beginImage, forKey: kCIInputImageKey)
chromaKeyFilter.setValue(0.5, forKey: kCIInputIntensityKey)
if let output = chromaKeyFilter.outputImage {
if let cgimg = context.createCGImage(output, from: output.extent) {
let processedImage = UIImage(cgImage: cgimg)
// do something interesting with the processed image
imageView.image = processedImage
}
}
}
}
func captureOutput(_ captureOutput: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// Here you can count how many frames are dopped
}
func startCapture() {
print("\(self.classForCoder)/" + #function)
if cameraSession.isRunning {
print("already running")
return
}
cameraSession.startRunning()
toggleTorch(on: true)
}
You need to set the delegate
dataOutput.sampleBufferDelegate = self
I need to capture for my barcode But this my code capture is full screen.
How to custom size or fix to small size.
please let my idea or code for custom size this thank you.
This my code capture is full screen.
import UIKit
import AVFoundation
protocol BarcodeDelegate {
func barcodeReaded(barcode: String)
}
class barcodeCapViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var delegate: BarcodeDelegate?
var captureSession: AVCaptureSession!
var code: String?
override func viewDidLoad() {
super.viewDidLoad()
self.captureSession = AVCaptureSession();
let videoCaptureDevice: AVCaptureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
do {
let videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
if self.captureSession.canAddInput(videoInput) {
self.captureSession.addInput(videoInput)
} else {
print("Could not add video input")
}
let metadataOutput = AVCaptureMetadataOutput()
if self.captureSession.canAddOutput(metadataOutput) {
self.captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
metadataOutput.metadataObjectTypes = [AVMetadataObjectTypeQRCode, AVMetadataObjectTypeEAN13Code]
} else {
print("Could not add metadata output")
}
let previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.frame = self.view.layer.bounds
self.view.layer .addSublayer(previewLayer)
self.captureSession.startRunning()
} catch let error as NSError {
print("Error while creating vide input device: \(error.localizedDescription)")
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]!, fromConnection connection: AVCaptureConnection!) {
for metadata in metadataObjects {
let readableObject = metadata as! AVMetadataMachineReadableCodeObject
let code = readableObject.stringValue
if !code.isEmpty {
self.captureSession.stopRunning()
self.dismissViewControllerAnimated(true, completion: nil)
self.delegate?.barcodeReaded(code)
}
}
}
}
When I add CGRectMake(20, 40, 200, 50)
show this
Add CGRectMake(20, 40, 500, 100)
show this
I don'n know why width and height not add up follow code.
Change the frame size of your AVCaptureVideoPreviewLayer:
let previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.frame = CGRectMake(10, 20, 100, 50) // something else!
If you're using autolayout, you probably don't want to deal with CALayer frames so you should create a UIView subclass, add your AVCaptureVideoPreviewLayer to that and set the layer's frame in layoutSubviews:
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}