I need to capture for my barcode But this my code capture is full screen.
How to custom size or fix to small size.
please let my idea or code for custom size this thank you.
This my code capture is full screen.
import UIKit
import AVFoundation
protocol BarcodeDelegate {
func barcodeReaded(barcode: String)
}
class barcodeCapViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var delegate: BarcodeDelegate?
var captureSession: AVCaptureSession!
var code: String?
override func viewDidLoad() {
super.viewDidLoad()
self.captureSession = AVCaptureSession();
let videoCaptureDevice: AVCaptureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
do {
let videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
if self.captureSession.canAddInput(videoInput) {
self.captureSession.addInput(videoInput)
} else {
print("Could not add video input")
}
let metadataOutput = AVCaptureMetadataOutput()
if self.captureSession.canAddOutput(metadataOutput) {
self.captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
metadataOutput.metadataObjectTypes = [AVMetadataObjectTypeQRCode, AVMetadataObjectTypeEAN13Code]
} else {
print("Could not add metadata output")
}
let previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.frame = self.view.layer.bounds
self.view.layer .addSublayer(previewLayer)
self.captureSession.startRunning()
} catch let error as NSError {
print("Error while creating vide input device: \(error.localizedDescription)")
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]!, fromConnection connection: AVCaptureConnection!) {
for metadata in metadataObjects {
let readableObject = metadata as! AVMetadataMachineReadableCodeObject
let code = readableObject.stringValue
if !code.isEmpty {
self.captureSession.stopRunning()
self.dismissViewControllerAnimated(true, completion: nil)
self.delegate?.barcodeReaded(code)
}
}
}
}
When I add CGRectMake(20, 40, 200, 50)
show this
Add CGRectMake(20, 40, 500, 100)
show this
I don'n know why width and height not add up follow code.
Change the frame size of your AVCaptureVideoPreviewLayer:
let previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.frame = CGRectMake(10, 20, 100, 50) // something else!
If you're using autolayout, you probably don't want to deal with CALayer frames so you should create a UIView subclass, add your AVCaptureVideoPreviewLayer to that and set the layer's frame in layoutSubviews:
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
Related
Hi I am fairly new to coding and I ran into this error with AVCapturePhotoOutput. I am using Swift 5 and Xcode 12.5.1. I'm running it on my actual device and I have been stuck on it for awhile. The error only occurs when I try to capture a photo. Any help would be greatly appreciated thank you :)
private let output = AVCapturePhotoOutput()
private var captureSession: AVCaptureSession?
private let previewLayer = AVCaptureVideoPreviewLayer()
private let cameraView = UIView()
t
private func setUpCamera() {
let captureSession = AVCaptureSession()
if let device = AVCaptureDevice.default(for: .video) {
do {
let input = try AVCaptureDeviceInput(device: device)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
}
if captureSession.canAddOutput(output) {
captureSession.addOutput(output)
}
// Layer
previewLayer.session = captureSession
previewLayer.videoGravity = .resizeAspectFill
cameraView.layer.addSublayer(previewLayer)
captureSession.startRunning()
captureSession.sessionPreset = AVCaptureSession.Preset.high
self.captureSession = captureSession
}
catch {
print(error)
}
}
}
t
Extension CameraViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let data = photo.fileDataRepresentation(),
let image = UIImage(data: data) else {
return
}
captureSession?.stopRunning()
showEditPhoto(image: image)
}
private func showEditPhoto(image: UIImage) {
guard let resizedImage = image.sd_resizedImage(
with: CGSize(width: 640, height: 640),
scaleMode: .aspectFill
) else {
return
}
let vc = PostEditViewController(image: resizedImage)
if #available(iOS 14.0, *) {
vc.navigationItem.backButtonDisplayMode = .minimal
}
navigationController?.pushViewController(vc, animated: false)
}
Try using ‘ cgImageRepresentation()’ instead of ‘ fileDataRepresentation()’. And init a UIImage from the given cgImage
The code below can run on an iPhone and when aimed at any QR code, should print the contents. The QR code below points to example.com.
The problem is that the delegate should supply the connection: AVCaptureConnection and it does, but its previewLayer property is nil.
The following code can be pasted into a new, empty Xcode project. If you disable (comment out) line 57, and enable line 56, it works fine. But I want to place the delegate outside the CaptureView class. How do I set up the capturing such that AVCaptureMetadataOutputObjectsDelegate its previewLayer property is not nil?
import UIKit
import AVFoundation
func printMetadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], previewLayer: AVCaptureVideoPreviewLayer) {
for object in metadataObjects {
let visualCodeObject = previewLayer.transformedMetadataObject(for: object)
guard let object = visualCodeObject, let barcode = object as? AVMetadataMachineReadableCodeObject else {
NSLog("Ignoring object that is not AVMetadataMachineReadableCodeObject")
continue
}
guard let barcodeString = barcode.stringValue else {
NSLog("Captured something that's not a string")
continue
}
NSLog("Captured string %#", barcodeString)
}
}
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
private let previewLayer = AVCaptureVideoPreviewLayer()
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: AVCaptureMetadataOutputObjectsDelegate) {
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
// metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: NSObject, AVCaptureMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
guard let previewLayer = connection.videoPreviewLayer else {
print("previewLayer was nil")
return
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
As the videoPreviewLayer documentation states:
This property is the set if you initialized the connection using
init(inputPort:videoPreviewLayer:) or
connectionWithInputPort:videoPreviewLayer:.
So in order to get a value in videoPreviewLayer property you have to setup the AVCaptureConnection object manually.
Instead, I would suggest to hide AVCaptureMetadataOutputObjectsDelegate behind a custom protocol that you can declare:
protocol CaptureViewMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection, previewLayer: AVCaptureVideoPreviewLayer)
}
then, implement the AVCaptureMetadataOutputObjectsDelegate protocol in your CaptureView and call your protocol's function passing the required AVCaptureVideoPreviewLayer. Your code will be like this:
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
private let previewLayer = AVCaptureVideoPreviewLayer()
private let delegate: CaptureViewMetadataOutputObjectsDelegate
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
delegate.metadataOutput(output, didOutput: metadataObjects, from: connection, previewLayer: previewLayer)
// printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: CaptureViewMetadataOutputObjectsDelegate) {
self.delegate = delegate
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
// metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: CaptureViewMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection, previewLayer: AVCaptureVideoPreviewLayer) {
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
Update: After some research I did found this statement in the init(inputPort:videoPreviewLayer:) function documentation:
When using addInput(_:): or addOutput(_:), connections are
automatically formed between all compatible inputs and outputs. You do
not need to manually create and add connections to the session unless
you use the primitive addInputWithNoConnections(_:) and
addOutputWithNoConnections(_:) methods.
Tha means tha when you added the device camera as input and the AVCaptureMetadataOutput as output, all the compatible AVCaptureConnection are created automatically.
I tried to create another AVCaptureConnection using the following code:
if let port = captureDeviceInput.ports.first(where: { $0.mediaType == .video }) {
let con = AVCaptureConnection(inputPort: port, videoPreviewLayer: self.previewLayer)
if captureSession.canAddConnection(con) {
captureSession.addConnection(con)
}
}
but the canAddConnection(_:) function always returns false.
After that I went and printed the connections array that the AVCaptureSession has and I saw following:
(lldb) po captureSession.connections
[<AVCaptureConnection: 0x280d67980 (AVCaptureDeviceInput: 0x280d119a0 Back Camera) -> (AVCaptureVideoPreviewLayer: 0x280d6ba40) [type:vide][enabled:1][active:1]>, <AVCaptureConnection: 0x280d7bee0 (AVCaptureDeviceInput: 0x280d119a0 Back Camera) -> (AVCaptureMetadataOutput: 0x280d700e0) [type:mobj][enabled:1][active:1]>]
So, an AVCaptureConnection has been created with the Back Camera as input and a AVCaptureVideoPreviewLayer instance (possibly the one that you created, the previewLayer property) and another one with the Back Camera as input and the AVCaptureMetadataOutput that you passed to AVCaptureSession.
The first one, obviously, did have some value to the videoPreviewLayer property:
(lldb) po captureSession.connections[0].videoPreviewLayer
▿ Optional<AVCaptureVideoPreviewLayer>
- some : <AVCaptureVideoPreviewLayer:0x280d6ba40; position = CGPoint (0 0); bounds = CGRect (0 0; 0 0); sublayers = (<CALayer: 0x280d6bc20>); masksToBounds = YES; allowsGroupOpacity = YES; inheritsTiming = NO; >
Apparently, the AVCaptureConnection instance that you are getting in metadataOutput(_:didOutput:from:) function will always be the second one. The one that is associating the Back Camera with the AVCaptureMetadataOutput.
I caught the bug.
In fact, even if when you enable this line:
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
in the correspondent metadataOutput(_, didOutput:, from:,) of CaptureView, connection.videoPreviewLayer is still nil. As the Developer Documentation says:
This property is the set if you initialized the connection using init(inputPort:videoPreviewLayer:) or connectionWithInputPort:videoPreviewLayer:.
So, in both way, connection.videoPreviewLayer will be nil.
I've update your code a little bit to make it work the way you want.
import UIKit
import AVFoundation
func printMetadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], previewLayer: AVCaptureVideoPreviewLayer) {
for object in metadataObjects {
let visualCodeObject = previewLayer.transformedMetadataObject(for: object)
guard let object = visualCodeObject, let barcode = object as? AVMetadataMachineReadableCodeObject else {
NSLog("Ignoring object that is not AVMetadataMachineReadableCodeObject")
continue
}
guard let barcodeString = barcode.stringValue else {
NSLog("Captured something that's not a string")
continue
}
NSLog("Captured string %#", barcodeString)
}
}
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
let previewLayer = AVCaptureVideoPreviewLayer()
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
if connection.videoPreviewLayer == nil {
print("connection.videoPreviewLayer was nil")
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: AVCaptureMetadataOutputObjectsDelegate) {
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
// metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: NSObject, AVCaptureMetadataOutputObjectsDelegate {
var previewLayer: AVCaptureVideoPreviewLayer?
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
guard let previewLayer = previewLayer else {
print("previewLayer was nil")
return
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
metadataDelegate.previewLayer = captureView.previewLayer
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
here is my code
import UIKit
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var cameraView: UIView!
var image: UIImage!
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
}
override func viewDidAppear(_ animated: Bool) {
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
#IBAction func cameraButton_Tab(_ sender: Any) {
let settings = AVCapturePhotoSettings()
// performSegue(withIdentifier: "showPhoto_Segue", sender: nil)
photoOutput?.capturePhoto(with: settings, delegate: self)
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
}else if device.position == AVCaptureDevice.Position.front{
frontCamera = device
}
}
currentCamera = backCamera
}
func setupInputOutput() {
do{
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey : AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
}catch {
print(error)
}
}
func setupPreviewLayer() {
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer!.frame = self.cameraView.bounds
self.cameraView.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
}
extension ViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation(){
image = UIImage(data: imageData)
}
}
}
See the image,
I want save the image which background's color is yellow
I can see the camera through of that
But I save the image, it seems that save the whole view, not square.
I make the UIImageView same size of yellow UIView and save the output,
it takes the whole view capture and resize of that.
Like change rectangle to square with squeeze
How I cant catch just yellow background size and save?
This didFinishProcessingPhoto will return the complete image like what camera is seeing. You won't the image directly which is shown in your PreviewLayer. So, in order to get the UIImage of shown PreviewLayer, you can resize the captured image.
Well, resize can also be done in two ways: One keeping aspect ratio and other by passing the exact size. I would recommend to go with aspect ratio because it will ensure that your image won't be squeeze or streched from any size, while passing wrong size won't able to fulfil you requirement.
Resize UIImage passing new CGSize:
extension UIImage {
func scaleImage(toSize newSize: CGSize) -> UIImage? {
var newImage: UIImage?
let newRect = CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height).integral
UIGraphicsBeginImageContextWithOptions(newSize, false, 0)
if let context = UIGraphicsGetCurrentContext(), let cgImage = self.cgImage {
context.interpolationQuality = .high
let flipVertical = CGAffineTransform(a: 1, b: 0, c: 0, d: -1, tx: 0, ty: newSize.height)
context.concatenate(flipVertical)
context.draw(cgImage, in: newRect)
if let img = context.makeImage() {
newImage = UIImage(cgImage: img)
}
UIGraphicsEndImageContext()
}
return newImage
}
}
Usage: capturedImage.scaleImage(toSize: CGSize(width: 300, height: 300))
Resize UIImage keeping aspect ratio:
extension UIImage {
func scaleImage(toWidth newWidth: CGFloat) -> UIImage {
let scale = newWidth / self.size.width
let newHeight = self.size.height * scale
let newSize = CGSize(width: newWidth, height: newHeight)
let renderer = UIGraphicsImageRenderer(size: newSize)
let image = renderer.image { (context) in
self.draw(in: CGRect(origin: CGPoint(x: 0, y: 0), size: newSize))
}
return image
}
}
Usage: capturedImage.scaleImage(toWidth: 300)
Reference: Resize UIImage to 200x200pt/px
Update:
Keep the below method as it is in your code:
#IBAction func cameraButton_Tab(_ sender: Any) {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: self)
}
extension ViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation(){
let capturedImage = UIImage(data: imageData)
let cropImage = capturedImage.scaleImage(toWidth: cameraPreviewLayer!.frame.size.width) //It will return the Image size of Camera Preview
}
}
}
I would like to prevent lags when the app switches between video-recording and photo-taking: by using only AVCaptureMovieFileOutput and getting a snapshot from it when captured an image.
Just like how SnapChat does.
Is it possible somehow? I haven't found any releated articles about this.
I don't want to switch between outputs, because it lags
The code:
#IBOutlet var cameraView: UIView!
#IBOutlet var cameraSwitchButton: UIButton!
#IBOutlet var captureButtonView: CaptureButton!
#IBOutlet var cameraFlashButton: UIButton!
var captureSession = AVCaptureSession()
let movieOutput = AVCaptureMovieFileOutput()
var activeInput: AVCaptureDeviceInput!
var previewLayer = AVCaptureVideoPreviewLayer()
var outputURL: URL!
var connection : AVCaptureConnection!
override func viewDidLoad() {
if setupSession() {
setupPreview()
startSession()
connection = movieOutput.connection(with: AVMediaType.video)
if (connection?.isVideoStabilizationSupported)! {
connection?.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.off
}
}
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(captureButtonTapped))
let longGesture = UILongPressGestureRecognizer(target: self, action: #selector(captureButtonLongPressed))
tapGesture.numberOfTapsRequired = 1
captureButtonView.addGestureRecognizer(tapGesture)
captureButtonView.addGestureRecognizer(longGesture)
}
#objc func captureButtonTapped(){
?? TAKE PHOTO HERE ??
}
var isRecordingVideo : Bool = false
#objc func captureButtonLongPressed(sender : UILongPressGestureRecognizer){
if sender.state == .began {
isRecordingVideo = true
startRecording()
captureButtonView.startTimer(duration: 10.0)
}
if sender.state == .ended || sender.state == .failed || sender.state == .cancelled {
captureButtonView.clear()
isRecordingVideo = false
stopRecording()
}
}
func setupPreview() {
// Configure previewLayer
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = cameraView.bounds
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraView.layer.addSublayer(previewLayer)
}
//MARK:- Setup Camera
func setupSession() -> Bool {
captureSession.sessionPreset = AVCaptureSession.Preset.high
// Setup Camera
let camera = AVCaptureDevice.default(for: AVMediaType.video)
do {
let input = try AVCaptureDeviceInput(device: camera!)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
activeInput = input
}
} catch {
print("Error setting device video input: \(error)")
return false
}
// Setup Microphone
let microphone = AVCaptureDevice.default(for: AVMediaType.audio)
do {
let micInput = try AVCaptureDeviceInput(device: microphone!)
if captureSession.canAddInput(micInput) {
captureSession.addInput(micInput)
}
} catch {
print("Error setting device audio input: \(error)")
return false
}
// Movie output
if captureSession.canAddOutput(movieOutput) {
captureSession.addOutput(movieOutput)
}
return true
}
func setupCaptureMode(_ mode: Int) {
}
//MARK:- Camera Session
func startSession() {
if !captureSession.isRunning {
videoQueue().async {
self.captureSession.startRunning()
}
}
}
func stopSession() {
if captureSession.isRunning {
videoQueue().async {
self.captureSession.stopRunning()
}
}
}
func videoQueue() -> DispatchQueue {
return DispatchQueue.main
}
func currentVideoOrientation() -> AVCaptureVideoOrientation {
var orientation: AVCaptureVideoOrientation
switch UIDevice.current.orientation {
case .portrait:
orientation = AVCaptureVideoOrientation.portrait
case .landscapeRight:
orientation = AVCaptureVideoOrientation.landscapeLeft
case .portraitUpsideDown:
orientation = AVCaptureVideoOrientation.portraitUpsideDown
default:
orientation = AVCaptureVideoOrientation.landscapeRight
}
return orientation
}
func startCapture() {
startRecording()
}
func tempURL() -> URL? {
let directory = NSTemporaryDirectory() as NSString
if directory != "" {
let path = directory.appendingPathComponent(NSUUID().uuidString + ".mp4")
return URL(fileURLWithPath: path)
}
return nil
}
func startRecording() {
if movieOutput.isRecording == false {
if (connection?.isVideoOrientationSupported)! {
connection?.videoOrientation = currentVideoOrientation()
}
let device = activeInput.device
if (device.isSmoothAutoFocusSupported) {
do {
try device.lockForConfiguration()
device.isSmoothAutoFocusEnabled = false
device.unlockForConfiguration()
} catch {
print("Error setting configuration: \(error)")
}
}
outputURL = tempURL()
movieOutput.startRecording(to: outputURL, recordingDelegate: self)
}
else {
stopRecording()
}
}
func stopRecording() {
if movieOutput.isRecording == true {
movieOutput.stopRecording()
}
}
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
if (error != nil) {
print("Error recording movie: \(error!.localizedDescription)")
} else {
UISaveVideoAtPathToSavedPhotosAlbum(outputURL.path, nil, nil, nil)
_ = outputURL as URL
}
outputURL = nil
}
I wasn't able to find a way using only AVCaptureMovieFileOutput, however you can add an additional photo output and trigger photos without having to switch between the outputs.
I'm short on time at the moment but this should get you going till I can edit with more info.
(See EDIT with full implementation below, and limited force unwrapping)
First off setup an additional var for a photo output in your view controller
// declare an additional camera output var
var cameraOutput = AVCapturePhotoOutput()
// do this in your 'setupSession' func where you setup your movie output
cameraOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(cameraOutput)
Declare a function to capture your photo using the cameraOutput:
func capturePhoto() {
// create settings for your photo capture
let settings = AVCapturePhotoSettings()
let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first!
let previewFormat = [
kCVPixelBufferPixelFormatTypeKey as String: previewPixelType,
kCVPixelBufferWidthKey as String: UIScreen.main.bounds.size.width,
kCVPixelBufferHeightKey as String: UIScreen.main.bounds.size.height
] as [String : Any]
settings.previewPhotoFormat = previewFormat
cameraOutput.capturePhoto(with: settings, delegate: self)
}
and conform to the AVCapturePhotoCaptureDelegate.
I created a separate class called VideoFeed to manage the video capture session, so this sample is an extension of that class. I'll update with more info on this later.
The loadImage(data: Data) function calls a delegate with the image. You can ignore that call if you put this directly in your view controller, and save or do whatever you like with the generated photo:
extension VideoFeed: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
guard error == nil else {
print("Photo Error: \(String(describing: error))")
return
}
guard let sampleBuffer = photoSampleBuffer,
let previewBuffer = previewPhotoSampleBuffer,
let outputData = AVCapturePhotoOutput
.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewBuffer) else {
print("Oops, unable to create jpeg image")
return
}
print("captured photo...")
loadImage(data: outputData)
}
func loadImage(data: Data) {
let dataProvider = CGDataProvider(data: data as CFData)
let cgImageRef: CGImage! = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
let image = UIImage(cgImage: cgImageRef, scale: 1.0, orientation: UIImageOrientation.right)
// do whatever you like with the generated image here...
delegate?.processVideoSnapshot(image)
}
}
EDIT:
Here's the complete implementation I used in my test project.
First I moved all the AVFoundation specific code into it's own VideoFeed class and created some callbacks to the view controller.
This separates concerns and limits the view controllers responsibilities to:
Adding the preview layer to the view
Triggering and handling the captured image/screenshot
Starting/stopping video file recording.
Here's the ViewController implementation:
ViewController.swift
import UIKit
import AVFoundation
class ViewController: UIViewController, VideoFeedDelegate {
#IBOutlet var cameraView: UIView!
var videoFeed: VideoFeed?
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
// end session
videoFeed?.stopSession()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
// request camera access
AVCaptureDevice.requestAccess(for: AVMediaType.video) { [weak self] granted in
guard granted != false else {
// TODO: show UI stating camera cannot be used, update in settings app...
print("Camera access denied")
return
}
DispatchQueue.main.async {
if self?.videoFeed == nil {
// video access was enabled so setup video feed
self?.videoFeed = VideoFeed(delegate: self)
} else {
// video feed already available, restart session...
self?.videoFeed?.startSession()
}
}
}
}
// MARK: VideoFeedDelegate
func videoFeedSetup(with layer: AVCaptureVideoPreviewLayer) {
// set the layer size
layer.frame = cameraView.layer.bounds
// add to view
cameraView.layer.addSublayer(layer)
}
func processVideoSnapshot(_ image: UIImage?) {
// validate
guard let image = image else {
return
}
// SAVE IMAGE HERE IF DESIRED
// for now just showing in a lightbox/detail view controller
let storyboard = UIStoryboard(name: "Main", bundle: Bundle(for: AppDelegate.self))
let vc = storyboard.instantiateViewController(withIdentifier: "LightboxViewController") as! LightboxViewController
vc.previewImage = image
navigationController?.pushViewController(vc, animated: true)
}
#IBAction func captureButtonTapped(_ sender: Any){
// trigger photo capture from video feed...
// this will trigger a callback to the function above with the captured image
videoFeed?.capturePhoto()
}
}
Here's the full implementation of the VideoFeed class.
Using this approach allows you to reuse the video functionality in other projects more easily without having it tightly coupled to the view controller.
VideoFeed.swift
import UIKit
import AVFoundation
/// Defines callbacks associated with the VideoFeed class. Notifies delegate of significant events.
protocol VideoFeedDelegate: class {
/// Callback triggered when the preview layer for this class has been created and configured. Conforming objects should set and maintain a strong reference to this layer otherwise it will be set to nil when the calling function finishes execution.
///
/// - Parameter layer: The video preview layer associated with the active captureSession in the VideoFeed class.
func videoFeedSetup(with layer: AVCaptureVideoPreviewLayer)
/// Callback triggered when a snapshot of the video feed has been generated.
///
/// - Parameter image: <#image description#>
func processVideoSnapshot(_ image: UIImage?)
}
class VideoFeed: NSObject {
// MARK: Variables
/// The capture session to be used in this class.
var captureSession = AVCaptureSession()
/// The preview layer associated with this session. This class has a
/// weak reference to this layer, the delegate (usually a ViewController
/// instance) should add this layer as a sublayer to its preview UIView.
/// The delegate will have the strong reference to this preview layer.
weak var previewLayer: AVCaptureVideoPreviewLayer?
/// The output that handles saving the video stream to a file.
var fileOutput: AVCaptureMovieFileOutput?
/// A reference to the active video input
var activeInput: AVCaptureDeviceInput?
/// Output for capturing frame grabs of video feed
var cameraOutput = AVCapturePhotoOutput()
/// Delegate to receive callbacks about significant events triggered by this class.
weak var delegate: VideoFeedDelegate?
/// The capture connection associated with the fileOutput.
/// Set when fileOutput is created.
var connection : AVCaptureConnection?
// MARK: Public accessors
/// Public initializer. Accepts a delegate to receive callbacks with the preview layer and any snapshot images.
///
/// - Parameter delegate: A reference to an object conforming to VideoFeedDelegate
/// to receive callbacks for significant events in this class.
init(delegate: VideoFeedDelegate?) {
self.delegate = delegate
super.init()
setupSession()
}
/// Public accessor to begin a capture session.
public func startSession() {
guard captureSession.isRunning == false else {
return
}
captureSession.startRunning()
}
/// Public accessor to end the current capture session.
public func stopSession() {
// validate
guard captureSession.isRunning else {
return
}
// end file recording if the session ends and we're currently recording a video to file
if let isRecording = fileOutput?.isRecording, isRecording {
stopRecording()
}
captureSession.stopRunning()
}
/// Public accessor to begin file recording.
public func startRecording() {
guard fileOutput?.isRecording == false else {
stopRecording()
return
}
configureVideoOrientation()
disableSmoothAutoFocus()
guard let url = tempURL() else {
print("Unable to start file recording, temp url generation failed.")
return
}
fileOutput?.startRecording(to: url, recordingDelegate: self)
}
/// Public accessor to end file recording.
public func stopRecording() {
guard fileOutput?.isRecording == true else {
return
}
fileOutput?.stopRecording()
}
/// Public accessor to trigger snapshot capture of video stream.
public func capturePhoto() {
// create settings object
let settings = AVCapturePhotoSettings()
// verify that we have a pixel format type available
guard let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first else {
print("Unable to configure photo capture settings, 'availablePreviewPhotoPixelFormatTypes' has no available options.")
return
}
let screensize = UIScreen.main.bounds.size
// setup format configuration dictionary
let previewFormat: [String : Any] = [
kCVPixelBufferPixelFormatTypeKey as String: previewPixelType,
kCVPixelBufferWidthKey as String: screensize.width,
kCVPixelBufferHeightKey as String: screensize.height
]
settings.previewPhotoFormat = previewFormat
// trigger photo capture
cameraOutput.capturePhoto(with: settings, delegate: self)
}
// MARK: Setup functions
/// Handles configuration and setup of the session, inputs, video preview layer and outputs.
/// If all are setup and configured it starts the session.
internal func setupSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.high
guard setupInputs() else {
return
}
setupOutputs()
setupVideoLayer()
startSession()
}
/// Sets up capture inputs for this session.
///
/// - Returns: Returns true if inputs are successfully setup, else false.
internal func setupInputs() -> Bool {
// only need access to this functionality within this function, so declare as sub-function
func addInput(input: AVCaptureInput) {
guard captureSession.canAddInput(input) else {
return
}
captureSession.addInput(input)
}
do {
if let camera = AVCaptureDevice.default(for: AVMediaType.video) {
let input = try AVCaptureDeviceInput(device: camera)
addInput(input: input)
activeInput = input
}
// Setup Microphone
if let microphone = AVCaptureDevice.default(for: AVMediaType.audio) {
let micInput = try AVCaptureDeviceInput(device: microphone)
addInput(input: micInput)
}
return true
} catch {
print("Error setting device video input: \(error)")
return false
}
}
internal func setupOutputs() {
// only need access to this functionality within this function, so declare as sub-function
func addOutput(output: AVCaptureOutput) {
if captureSession.canAddOutput(output) {
captureSession.addOutput(output)
}
}
// file output
let fileOutput = AVCaptureMovieFileOutput()
captureSession.addOutput(fileOutput)
if let connection = fileOutput.connection(with: .video), connection.isVideoStabilizationSupported {
connection.preferredVideoStabilizationMode = .off
self.connection = connection
}
cameraOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(cameraOutput)
}
internal func setupVideoLayer() {
let layer = AVCaptureVideoPreviewLayer(session: captureSession)
layer.videoGravity = AVLayerVideoGravity.resizeAspectFill
delegate?.videoFeedSetup(with: layer)
previewLayer = layer
}
// MARK: Helper functions
/// Creates a url in the temporary directory for file recording.
///
/// - Returns: A file url if successful, else nil.
internal func tempURL() -> URL? {
let directory = NSTemporaryDirectory() as NSString
if directory != "" {
let path = directory.appendingPathComponent(NSUUID().uuidString + ".mp4")
return URL(fileURLWithPath: path)
}
return nil
}
/// Disables smooth autofocus functionality on the active device,
/// if the active device is set and 'isSmoothAutoFocusSupported'
/// is supported for the currently set active device.
internal func disableSmoothAutoFocus() {
guard let device = activeInput?.device, device.isSmoothAutoFocusSupported else {
return
}
do {
try device.lockForConfiguration()
device.isSmoothAutoFocusEnabled = false
device.unlockForConfiguration()
} catch {
print("Error disabling smooth autofocus: \(error)")
}
}
/// Sets the current AVCaptureVideoOrientation on the currently active connection if it's supported.
internal func configureVideoOrientation() {
guard let connection = connection, connection.isVideoOrientationSupported,
let currentOrientation = AVCaptureVideoOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue) else {
return
}
connection.videoOrientation = currentOrientation
}
}
// MARK: AVCapturePhotoCaptureDelegate
extension VideoFeed: AVCapturePhotoCaptureDelegate {
// iOS 11+ processing
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard error == nil, let outputData = photo.fileDataRepresentation() else {
print("Photo Error: \(String(describing: error))")
return
}
print("captured photo...")
loadImage(data: outputData)
}
// iOS < 11 processing
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if #available(iOS 11.0, *) {
// use iOS 11-only feature
// nothing to do here as iOS 11 uses the callback above
} else {
guard error == nil else {
print("Photo Error: \(String(describing: error))")
return
}
guard let sampleBuffer = photoSampleBuffer,
let previewBuffer = previewPhotoSampleBuffer,
let outputData = AVCapturePhotoOutput
.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewBuffer) else {
print("Image creation from sample buffer/preview buffer failed.")
return
}
print("captured photo...")
loadImage(data: outputData)
}
}
/// Creates a UIImage from Data object received from AVCapturePhotoOutput
/// delegate callback and sends to the VideoFeedDelegate for handling.
///
/// - Parameter data: Image data.
internal func loadImage(data: Data) {
guard let dataProvider = CGDataProvider(data: data as CFData), let cgImageRef: CGImage = CGImage(jpegDataProviderSource: dataProvider, decode: nil, shouldInterpolate: true, intent: .defaultIntent) else {
return
}
let image = UIImage(cgImage: cgImageRef, scale: 1.0, orientation: UIImageOrientation.right)
delegate?.processVideoSnapshot(image)
}
}
extension VideoFeed: AVCaptureFileOutputRecordingDelegate {
func fileOutput(_ output: AVCaptureFileOutput, didStartRecordingTo fileURL: URL, from connections: [AVCaptureConnection]) {
print("Video recording started: \(fileURL.absoluteString)")
}
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
guard error == nil else {
print("Error recording movie: \(String(describing: error))")
return
}
UISaveVideoAtPathToSavedPhotosAlbum(outputFileURL.path, nil, nil, nil)
}
}
For anyone else making use of this, don't forget to add permissions to your info.plist for access to the camera, photo library and microphone.
<key>NSCameraUsageDescription</key>
<string>Let us use your camera</string>
<key>NSPhotoLibraryAddUsageDescription</key>
<string>save to images</string>
<key>NSMicrophoneUsageDescription</key>
<string>for sound in video</string>
I'm trying screen capture on a Mac with AVCaptureScreenInput, but AVCaptureVideoDataOutput delegate captureOutput is never called, and I'm not sure why. I do get a notification saying the capture session was started.
import Cocoa
import AVFoundation
class ViewController: NSViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
var captureSession: AVCaptureSession!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
}
override func viewWillAppear() {
super.viewWillAppear()
NSNotificationCenter.defaultCenter().addObserver(self, selector: #selector(ViewController.errorNotif), name: AVCaptureSessionRuntimeErrorNotification, object: nil)
NSNotificationCenter.defaultCenter().addObserver(self, selector: #selector(ViewController.startedNotif), name: AVCaptureSessionDidStartRunningNotification, object: nil)
startScreenCapture()
}
override func viewWillDisappear() {
super.viewWillDisappear()
NSNotificationCenter.defaultCenter().removeObserver(self)
}
func captureOutput(captureOutput: AVCaptureOutput!, didDropSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
print("ignore frame, add code to handle later")
}
func startScreenCapture() {
let displayId = CGMainDisplayID()
captureSession = AVCaptureSession()
if captureSession.canSetSessionPreset(AVCaptureSessionPresetHigh) {
captureSession.sessionPreset = AVCaptureSessionPresetHigh
}
let captureScreenInput = AVCaptureScreenInput(displayID: displayId)
if captureSession.canAddInput(captureScreenInput) {
captureSession.addInput(captureScreenInput)
} else {
print("Could not add main display to capture input")
}
let output = AVCaptureVideoDataOutput()
let queue = dispatch_queue_create("myQueue", DISPATCH_QUEUE_SERIAL)
output.setSampleBufferDelegate(self, queue: queue)
output.alwaysDiscardsLateVideoFrames = true
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as NSString: NSNumber(unsignedInt: kCVPixelFormatType_32BGRA)]
captureSession.addOutput(output)
captureSession.startRunning()
}
func errorNotif() {
print("error starting capture")
}
func startedNotif() {
print("started screen capture")
}
}
I added for basic example AVCaptureVideoDataOutputSampleBufferDelegate
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let videoQueue = DispatchQueue(label: "VIDEO_QUEUE")
override func viewDidLoad() {
super.viewDidLoad()
let captureSession = AVCaptureSession()
guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: videoQueue)
captureSession.addOutput(dataOutput)
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print("Camera was able to capture a frame:", Date())
}
}
You need to define the didOutputSampleBuffer delegate callback to actually receive the captured frames:
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
print("captured \(sampleBuffer)")
}
p.s. I'm not sure about macOS, but viewWillAppear may not be a good place to do initialisation because on iOS at least it can be called multiple times.