QR scanner in Swift 5 - swift

I built a QR scanner in Swift 5. It will acknowledge the QR Code and scan it, however it only pulls up the url that is embedded in the QR code. Does anyone have any advice on how to make it so that I can tap the link and open it in a browser?
This is the code I have for the scanner:
import UIKit
import AVFoundation
extension QRScannerController: AVCaptureMetadataOutputObjectsDelegate {
}
class QRScannerController: UIViewController {
var captureSession = AVCaptureSession()
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var qrcodeFrameView: UIView?
#IBOutlet var messageLabel: UILabel!
#IBOutlet var topBar: UIView!
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
// Check if the metadataObjects array is not nil and it cotains at least one object
if metadataObjects.count == 0 {
qrcodeFrameView?.frame = CGRect.zero
messageLabel.text = "No QR Code is detected"
return
}
//Get the metadata object
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if metadataObj.type == AVMetadataObject.ObjectType.qr {
// If the found metadata is equal to the QR code metadata then update the status label's text and set the bounds
let barCodeObject = videoPreviewLayer?.transformedMetadataObject(for: metadataObj)
qrcodeFrameView?.frame = barCodeObject!.bounds
if metadataObj.stringValue != nil {
messageLabel.text = metadataObj.stringValue
}
}
}
override func viewDidLoad() {
super.viewDidLoad()
// get back camera for capture
guard let captureDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back) else {
print("Failed to get camera device")
return
}
do {
// Get an instance of the AVCaptureDeviceInput class using the previous device object
let input = try AVCaptureDeviceInput(device: captureDevice)
//Set the input device on the capture session
captureSession.addInput(input)
//Initialize a AVCaptureMetadataOutput object and set it as the output device to the capture session
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession.addOutput(captureMetadataOutput)
//Set delegate and use the default dispatch queue to execute the call back
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.qr]
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
//Start video capture
captureSession.startRunning()
// Move the message label and top bar to the front
view.bringSubviewToFront(messageLabel)
view.bringSubviewToFront(topBar)
// Initialize QR Code Frame to highlight the QR Code
qrcodeFrameView = UIView()
if let qrcodeFrameView = qrcodeFrameView {
qrcodeFrameView.layer.borderColor = UIColor.yellow.cgColor
qrcodeFrameView.layer.borderWidth = 2
view.addSubview(qrcodeFrameView)
view.bringSubviewToFront(qrcodeFrameView)
}
} catch {
print(error)
return
}
}
}

Related

"Initializer for conditional binding...." after I fix, I get error "use of unresolved indentifier"

I've created a function called prepareCamera. It checks for device type, media type, and camera position.
When I use the if/ let on my object availableDevices, I get the error "Initializer for conditional binding must have optional type. After going through stack flow, I realized that my object is not an optional and if/let should only be used for optional type. I removed the if/ let, and I get the error, use of unresolved identifier.
My code is below. This is my first time using stackflow so bare with me haha. Any help would be greatly appreciated.
import UIKit
import AVFoundation
class goliveViewController: UIViewController {
let captureSession = AVCaptureSession()
var previewLayer:CALayer!
var captureDevice:AVCaptureDevice!
override func viewDidLoad() {
super.viewDidLoad()
prepareCamera()
// Do any additional setup after loading the view.
}
func prepareCamera () {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
if let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera] , mediaType: AVMediaType.video, position: .front).devices {
captureDevice = availableDevices.first
beginSession()
}
}
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
}catch {
print(error.localizedDescription)
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString):NSNumber(value:kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
}
}

Initializer error in Camera App for Xcode in Swift

I am building an app similar to a camera app in Xcode 10.1 using Swift. To do this, I have imported AVFoundation, and am close to finishing my code. However, upon this line of code
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
which is in this block of code
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput( device: captureDevice!)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = self.previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
There appears an error that reads "Cannot invoke initializer for type 'AVCaptureVideoPreviewLayer' with an argument list of type '(session: AVCaptureSession, () -> ())'"
I don't exactly know what this means or how to fix it as I am relatively new to programming.
Where have you initialized captureSession?
Try something like this in your UIViewController:
var captureSession = AVCaptureSession()
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
beginSession()
}
func beginSession() {
// Get an instance of the AVCaptureDevice class to initialize a device object and provide the video as the media type parameter.
if let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) {
do {
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
let input = try AVCaptureDeviceInput(device: captureDevice)
// Set the input device on the capture session.
captureSession.addInput(input)
// Initialize a AVCaptureVideoDataOutput object and set it as the output device to the capture session.
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer.
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = self.view.layer.bounds // It may be best to setup an UIView outlet instead of using self.view
self.view.layer.addSublayer(videoPreviewLayer!)
// Start video capture.
captureSession.startRunning()
} catch {
// If any error occurs, simply print it out and don't continue any more.
print(error)
return
}
}
}
Hope it helps you!

Getting image from AVCaptureMovieFileOutput without switching

I would like to prevent lags when the app switches between video-recording and photo-taking: by using only AVCaptureMovieFileOutput and getting a snapshot from it when captured an image.
Just like how SnapChat does.
Is it possible somehow? I haven't found any releated articles about this.
I don't want to switch between outputs, because it lags
The code:
#IBOutlet var cameraView: UIView!
#IBOutlet var cameraSwitchButton: UIButton!
#IBOutlet var captureButtonView: CaptureButton!
#IBOutlet var cameraFlashButton: UIButton!
var captureSession = AVCaptureSession()
let movieOutput = AVCaptureMovieFileOutput()
var activeInput: AVCaptureDeviceInput!
var previewLayer = AVCaptureVideoPreviewLayer()
var outputURL: URL!
var connection : AVCaptureConnection!
override func viewDidLoad() {
if setupSession() {
setupPreview()
startSession()
connection = movieOutput.connection(with: AVMediaType.video)
if (connection?.isVideoStabilizationSupported)! {
connection?.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.off
}
}
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(captureButtonTapped))
let longGesture = UILongPressGestureRecognizer(target: self, action: #selector(captureButtonLongPressed))
tapGesture.numberOfTapsRequired = 1
captureButtonView.addGestureRecognizer(tapGesture)
captureButtonView.addGestureRecognizer(longGesture)
}
#objc func captureButtonTapped(){
?? TAKE PHOTO HERE ??
}
var isRecordingVideo : Bool = false
#objc func captureButtonLongPressed(sender : UILongPressGestureRecognizer){
if sender.state == .began {
isRecordingVideo = true
startRecording()
captureButtonView.startTimer(duration: 10.0)
}
if sender.state == .ended || sender.state == .failed || sender.state == .cancelled {
captureButtonView.clear()
isRecordingVideo = false
stopRecording()
}
}
func setupPreview() {
// Configure previewLayer
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = cameraView.bounds
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraView.layer.addSublayer(previewLayer)
}
//MARK:- Setup Camera
func setupSession() -> Bool {
captureSession.sessionPreset = AVCaptureSession.Preset.high
// Setup Camera
let camera = AVCaptureDevice.default(for: AVMediaType.video)
do {
let input = try AVCaptureDeviceInput(device: camera!)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
activeInput = input
}
} catch {
print("Error setting device video input: \(error)")
return false
}
// Setup Microphone
let microphone = AVCaptureDevice.default(for: AVMediaType.audio)
do {
let micInput = try AVCaptureDeviceInput(device: microphone!)
if captureSession.canAddInput(micInput) {
captureSession.addInput(micInput)
}
} catch {
print("Error setting device audio input: \(error)")
return false
}
// Movie output
if captureSession.canAddOutput(movieOutput) {
captureSession.addOutput(movieOutput)
}
return true
}
func setupCaptureMode(_ mode: Int) {
}
//MARK:- Camera Session
func startSession() {
if !captureSession.isRunning {
videoQueue().async {
self.captureSession.startRunning()
}
}
}
func stopSession() {
if captureSession.isRunning {
videoQueue().async {
self.captureSession.stopRunning()
}
}
}
func videoQueue() -> DispatchQueue {
return DispatchQueue.main
}
func currentVideoOrientation() -> AVCaptureVideoOrientation {
var orientation: AVCaptureVideoOrientation
switch UIDevice.current.orientation {
case .portrait:
orientation = AVCaptureVideoOrientation.portrait
case .landscapeRight:
orientation = AVCaptureVideoOrientation.landscapeLeft
case .portraitUpsideDown:
orientation = AVCaptureVideoOrientation.portraitUpsideDown
default:
orientation = AVCaptureVideoOrientation.landscapeRight
}
return orientation
}
func startCapture() {
startRecording()
}
func tempURL() -> URL? {
let directory = NSTemporaryDirectory() as NSString
if directory != "" {
let path = directory.appendingPathComponent(NSUUID().uuidString + ".mp4")
return URL(fileURLWithPath: path)
}
return nil
}
func startRecording() {
if movieOutput.isRecording == false {
if (connection?.isVideoOrientationSupported)! {
connection?.videoOrientation = currentVideoOrientation()
}
let device = activeInput.device
if (device.isSmoothAutoFocusSupported) {
do {
try device.lockForConfiguration()
device.isSmoothAutoFocusEnabled = false
device.unlockForConfiguration()
} catch {
print("Error setting configuration: \(error)")
}
}
outputURL = tempURL()
movieOutput.startRecording(to: outputURL, recordingDelegate: self)
}
else {
stopRecording()
}
}
func stopRecording() {
if movieOutput.isRecording == true {
movieOutput.stopRecording()
}
}
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
if (error != nil) {
print("Error recording movie: \(error!.localizedDescription)")
} else {
UISaveVideoAtPathToSavedPhotosAlbum(outputURL.path, nil, nil, nil)
_ = outputURL as URL
}
outputURL = nil
}
I wasn't able to find a way using only AVCaptureMovieFileOutput, however you can add an additional photo output and trigger photos without having to switch between the outputs.
I'm short on time at the moment but this should get you going till I can edit with more info.
(See EDIT with full implementation below, and limited force unwrapping)
First off setup an additional var for a photo output in your view controller
// declare an additional camera output var
var cameraOutput = AVCapturePhotoOutput()
// do this in your 'setupSession' func where you setup your movie output
cameraOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(cameraOutput)
Declare a function to capture your photo using the cameraOutput:
func capturePhoto() {
// create settings for your photo capture
let settings = AVCapturePhotoSettings()
let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first!
let previewFormat = [
kCVPixelBufferPixelFormatTypeKey as String: previewPixelType,
kCVPixelBufferWidthKey as String: UIScreen.main.bounds.size.width,
kCVPixelBufferHeightKey as String: UIScreen.main.bounds.size.height
] as [String : Any]
settings.previewPhotoFormat = previewFormat
cameraOutput.capturePhoto(with: settings, delegate: self)
}
and conform to the AVCapturePhotoCaptureDelegate.
I created a separate class called VideoFeed to manage the video capture session, so this sample is an extension of that class. I'll update with more info on this later.
The loadImage(data: Data) function calls a delegate with the image. You can ignore that call if you put this directly in your view controller, and save or do whatever you like with the generated photo:
extension VideoFeed: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
guard error == nil else {
print("Photo Error: \(String(describing: error))")
return
}
guard let sampleBuffer = photoSampleBuffer,
let previewBuffer = previewPhotoSampleBuffer,
let outputData = AVCapturePhotoOutput
.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewBuffer) else {
print("Oops, unable to create jpeg image")
return
}
print("captured photo...")
loadImage(data: outputData)
}
func loadImage(data: Data) {
let dataProvider = CGDataProvider(data: data as CFData)
let cgImageRef: CGImage! = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
let image = UIImage(cgImage: cgImageRef, scale: 1.0, orientation: UIImageOrientation.right)
// do whatever you like with the generated image here...
delegate?.processVideoSnapshot(image)
}
}
EDIT:
Here's the complete implementation I used in my test project.
First I moved all the AVFoundation specific code into it's own VideoFeed class and created some callbacks to the view controller.
This separates concerns and limits the view controllers responsibilities to:
Adding the preview layer to the view
Triggering and handling the captured image/screenshot
Starting/stopping video file recording.
Here's the ViewController implementation:
ViewController.swift
import UIKit
import AVFoundation
class ViewController: UIViewController, VideoFeedDelegate {
#IBOutlet var cameraView: UIView!
var videoFeed: VideoFeed?
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
// end session
videoFeed?.stopSession()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
// request camera access
AVCaptureDevice.requestAccess(for: AVMediaType.video) { [weak self] granted in
guard granted != false else {
// TODO: show UI stating camera cannot be used, update in settings app...
print("Camera access denied")
return
}
DispatchQueue.main.async {
if self?.videoFeed == nil {
// video access was enabled so setup video feed
self?.videoFeed = VideoFeed(delegate: self)
} else {
// video feed already available, restart session...
self?.videoFeed?.startSession()
}
}
}
}
// MARK: VideoFeedDelegate
func videoFeedSetup(with layer: AVCaptureVideoPreviewLayer) {
// set the layer size
layer.frame = cameraView.layer.bounds
// add to view
cameraView.layer.addSublayer(layer)
}
func processVideoSnapshot(_ image: UIImage?) {
// validate
guard let image = image else {
return
}
// SAVE IMAGE HERE IF DESIRED
// for now just showing in a lightbox/detail view controller
let storyboard = UIStoryboard(name: "Main", bundle: Bundle(for: AppDelegate.self))
let vc = storyboard.instantiateViewController(withIdentifier: "LightboxViewController") as! LightboxViewController
vc.previewImage = image
navigationController?.pushViewController(vc, animated: true)
}
#IBAction func captureButtonTapped(_ sender: Any){
// trigger photo capture from video feed...
// this will trigger a callback to the function above with the captured image
videoFeed?.capturePhoto()
}
}
Here's the full implementation of the VideoFeed class.
Using this approach allows you to reuse the video functionality in other projects more easily without having it tightly coupled to the view controller.
VideoFeed.swift
import UIKit
import AVFoundation
/// Defines callbacks associated with the VideoFeed class. Notifies delegate of significant events.
protocol VideoFeedDelegate: class {
/// Callback triggered when the preview layer for this class has been created and configured. Conforming objects should set and maintain a strong reference to this layer otherwise it will be set to nil when the calling function finishes execution.
///
/// - Parameter layer: The video preview layer associated with the active captureSession in the VideoFeed class.
func videoFeedSetup(with layer: AVCaptureVideoPreviewLayer)
/// Callback triggered when a snapshot of the video feed has been generated.
///
/// - Parameter image: <#image description#>
func processVideoSnapshot(_ image: UIImage?)
}
class VideoFeed: NSObject {
// MARK: Variables
/// The capture session to be used in this class.
var captureSession = AVCaptureSession()
/// The preview layer associated with this session. This class has a
/// weak reference to this layer, the delegate (usually a ViewController
/// instance) should add this layer as a sublayer to its preview UIView.
/// The delegate will have the strong reference to this preview layer.
weak var previewLayer: AVCaptureVideoPreviewLayer?
/// The output that handles saving the video stream to a file.
var fileOutput: AVCaptureMovieFileOutput?
/// A reference to the active video input
var activeInput: AVCaptureDeviceInput?
/// Output for capturing frame grabs of video feed
var cameraOutput = AVCapturePhotoOutput()
/// Delegate to receive callbacks about significant events triggered by this class.
weak var delegate: VideoFeedDelegate?
/// The capture connection associated with the fileOutput.
/// Set when fileOutput is created.
var connection : AVCaptureConnection?
// MARK: Public accessors
/// Public initializer. Accepts a delegate to receive callbacks with the preview layer and any snapshot images.
///
/// - Parameter delegate: A reference to an object conforming to VideoFeedDelegate
/// to receive callbacks for significant events in this class.
init(delegate: VideoFeedDelegate?) {
self.delegate = delegate
super.init()
setupSession()
}
/// Public accessor to begin a capture session.
public func startSession() {
guard captureSession.isRunning == false else {
return
}
captureSession.startRunning()
}
/// Public accessor to end the current capture session.
public func stopSession() {
// validate
guard captureSession.isRunning else {
return
}
// end file recording if the session ends and we're currently recording a video to file
if let isRecording = fileOutput?.isRecording, isRecording {
stopRecording()
}
captureSession.stopRunning()
}
/// Public accessor to begin file recording.
public func startRecording() {
guard fileOutput?.isRecording == false else {
stopRecording()
return
}
configureVideoOrientation()
disableSmoothAutoFocus()
guard let url = tempURL() else {
print("Unable to start file recording, temp url generation failed.")
return
}
fileOutput?.startRecording(to: url, recordingDelegate: self)
}
/// Public accessor to end file recording.
public func stopRecording() {
guard fileOutput?.isRecording == true else {
return
}
fileOutput?.stopRecording()
}
/// Public accessor to trigger snapshot capture of video stream.
public func capturePhoto() {
// create settings object
let settings = AVCapturePhotoSettings()
// verify that we have a pixel format type available
guard let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first else {
print("Unable to configure photo capture settings, 'availablePreviewPhotoPixelFormatTypes' has no available options.")
return
}
let screensize = UIScreen.main.bounds.size
// setup format configuration dictionary
let previewFormat: [String : Any] = [
kCVPixelBufferPixelFormatTypeKey as String: previewPixelType,
kCVPixelBufferWidthKey as String: screensize.width,
kCVPixelBufferHeightKey as String: screensize.height
]
settings.previewPhotoFormat = previewFormat
// trigger photo capture
cameraOutput.capturePhoto(with: settings, delegate: self)
}
// MARK: Setup functions
/// Handles configuration and setup of the session, inputs, video preview layer and outputs.
/// If all are setup and configured it starts the session.
internal func setupSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.high
guard setupInputs() else {
return
}
setupOutputs()
setupVideoLayer()
startSession()
}
/// Sets up capture inputs for this session.
///
/// - Returns: Returns true if inputs are successfully setup, else false.
internal func setupInputs() -> Bool {
// only need access to this functionality within this function, so declare as sub-function
func addInput(input: AVCaptureInput) {
guard captureSession.canAddInput(input) else {
return
}
captureSession.addInput(input)
}
do {
if let camera = AVCaptureDevice.default(for: AVMediaType.video) {
let input = try AVCaptureDeviceInput(device: camera)
addInput(input: input)
activeInput = input
}
// Setup Microphone
if let microphone = AVCaptureDevice.default(for: AVMediaType.audio) {
let micInput = try AVCaptureDeviceInput(device: microphone)
addInput(input: micInput)
}
return true
} catch {
print("Error setting device video input: \(error)")
return false
}
}
internal func setupOutputs() {
// only need access to this functionality within this function, so declare as sub-function
func addOutput(output: AVCaptureOutput) {
if captureSession.canAddOutput(output) {
captureSession.addOutput(output)
}
}
// file output
let fileOutput = AVCaptureMovieFileOutput()
captureSession.addOutput(fileOutput)
if let connection = fileOutput.connection(with: .video), connection.isVideoStabilizationSupported {
connection.preferredVideoStabilizationMode = .off
self.connection = connection
}
cameraOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(cameraOutput)
}
internal func setupVideoLayer() {
let layer = AVCaptureVideoPreviewLayer(session: captureSession)
layer.videoGravity = AVLayerVideoGravity.resizeAspectFill
delegate?.videoFeedSetup(with: layer)
previewLayer = layer
}
// MARK: Helper functions
/// Creates a url in the temporary directory for file recording.
///
/// - Returns: A file url if successful, else nil.
internal func tempURL() -> URL? {
let directory = NSTemporaryDirectory() as NSString
if directory != "" {
let path = directory.appendingPathComponent(NSUUID().uuidString + ".mp4")
return URL(fileURLWithPath: path)
}
return nil
}
/// Disables smooth autofocus functionality on the active device,
/// if the active device is set and 'isSmoothAutoFocusSupported'
/// is supported for the currently set active device.
internal func disableSmoothAutoFocus() {
guard let device = activeInput?.device, device.isSmoothAutoFocusSupported else {
return
}
do {
try device.lockForConfiguration()
device.isSmoothAutoFocusEnabled = false
device.unlockForConfiguration()
} catch {
print("Error disabling smooth autofocus: \(error)")
}
}
/// Sets the current AVCaptureVideoOrientation on the currently active connection if it's supported.
internal func configureVideoOrientation() {
guard let connection = connection, connection.isVideoOrientationSupported,
let currentOrientation = AVCaptureVideoOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue) else {
return
}
connection.videoOrientation = currentOrientation
}
}
// MARK: AVCapturePhotoCaptureDelegate
extension VideoFeed: AVCapturePhotoCaptureDelegate {
// iOS 11+ processing
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard error == nil, let outputData = photo.fileDataRepresentation() else {
print("Photo Error: \(String(describing: error))")
return
}
print("captured photo...")
loadImage(data: outputData)
}
// iOS < 11 processing
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if #available(iOS 11.0, *) {
// use iOS 11-only feature
// nothing to do here as iOS 11 uses the callback above
} else {
guard error == nil else {
print("Photo Error: \(String(describing: error))")
return
}
guard let sampleBuffer = photoSampleBuffer,
let previewBuffer = previewPhotoSampleBuffer,
let outputData = AVCapturePhotoOutput
.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewBuffer) else {
print("Image creation from sample buffer/preview buffer failed.")
return
}
print("captured photo...")
loadImage(data: outputData)
}
}
/// Creates a UIImage from Data object received from AVCapturePhotoOutput
/// delegate callback and sends to the VideoFeedDelegate for handling.
///
/// - Parameter data: Image data.
internal func loadImage(data: Data) {
guard let dataProvider = CGDataProvider(data: data as CFData), let cgImageRef: CGImage = CGImage(jpegDataProviderSource: dataProvider, decode: nil, shouldInterpolate: true, intent: .defaultIntent) else {
return
}
let image = UIImage(cgImage: cgImageRef, scale: 1.0, orientation: UIImageOrientation.right)
delegate?.processVideoSnapshot(image)
}
}
extension VideoFeed: AVCaptureFileOutputRecordingDelegate {
func fileOutput(_ output: AVCaptureFileOutput, didStartRecordingTo fileURL: URL, from connections: [AVCaptureConnection]) {
print("Video recording started: \(fileURL.absoluteString)")
}
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
guard error == nil else {
print("Error recording movie: \(String(describing: error))")
return
}
UISaveVideoAtPathToSavedPhotosAlbum(outputFileURL.path, nil, nil, nil)
}
}
For anyone else making use of this, don't forget to add permissions to your info.plist for access to the camera, photo library and microphone.
<key>NSCameraUsageDescription</key>
<string>Let us use your camera</string>
<key>NSPhotoLibraryAddUsageDescription</key>
<string>save to images</string>
<key>NSMicrophoneUsageDescription</key>
<string>for sound in video</string>

AVFoundation PDF417 scanner doesn't always work

I am creating an app using Swift 4 and Xcode 9 that scans PDF417 barcodes using AVFoundation. The scanner works with some codes but doesn't recognize the PDF417 barcode that you would find on the front of a CA Lottery scratchers ticket for example.
Is there anything I am missing to make it work? Below is my code:
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
guard let captureDevice = deviceDiscoverySession.devices.first else {
print("Failed to get the camera device")
return
}
do {
captureSession = AVCaptureSession()
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession!.addInput(input)
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession!.addOutput(captureMetadataOutput)
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.pdf417]
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
} catch {
print(error)
return
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
//Get the metadata object
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if scanType.contains(metadataObj.type) {
let barCodeObj = videoPreviewLayer?.transformedMetadataObject(for: metadataObj)
if(metadataObj.stringValue != nil) {
callDelegate(metadataObj.stringValue)
captureSession?.stopRunning()
AudioServicesPlayAlertSound(SystemSoundID(kSystemSoundID_Vibrate))
navigationController?.popViewController(animated: true)
}
}
}
Thanks!
Replace your initialization code for the scanner with the following code either in your viewDidLoad or some method that you'd like it to be in
// Global vars used in init below
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
func setupCaptureInputDevice() {
let cameraMediaType = AVMediaType.video
captureSession = AVCaptureSession()
// get the video capture device, which should be of type video
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else {
// if there is an error then something is wrong, so dismiss
dismiss(animated: true, completion: nil)
return
}
let videoInput: AVCaptureDeviceInput
// create a capture input for the above device input that was created
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
// this is important to check for if we are able to add the input
// because adding before this could cause a crash or it could not happen
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
// dismiss or display error
return
}
// get ready to capture output somewhere
let metadataOutput = AVCaptureMetadataOutput()
// again check to make sure that we can do this operation before doing it
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
// setting the metadataOutput's delegate to be self and then requesting it run on the main thread
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
// specify your code type
metadataOutput.metadataObjectTypes = [.pdf417]
} else {
// dismiss or display error
return
}
// the preview layer now becomes the capture session
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
// just add it to the screen
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
// and begin input
captureSession.startRunning()
}

AVCapturePhoto Why is my photo always nil?

I'm trying to implement some custom camera similar to snapchat. I cannot see why my image is always nil during segue. Maybe a fresh pair of eyes may help as I have been working on this for 2 days now...
When I tap the take photo button (to run "livePhotoTapped"), app crashes with error: fatal error: unexpectedly found nil while unwrapping an Optional value, referring to the image being nil
Any help would be nice :)
#IBOutlet weak var cameraView: UIView!
//session to capture data
var captureSession = AVCaptureSession()
//which camera to use
var backFacingCamera: AVCaptureDevice?
var frontFacingCamera: AVCaptureDevice?
var currentDevice: AVCaptureDevice?
var stillImageOutput: AVCaptureStillImageOutput?
var stillImage: UIImage?
//camera preview layer
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
func setupCaptureSessionCamera() {
//this makes sure to get full res of camera
captureSession.sessionPreset = AVCaptureSession.Preset.photo
var devices = AVCaptureDevice.devices(for: .video)
//query available devices
for device in devices {
if device.position == .front {
frontFacingCamera = device
} else if device.position == .back {
backFacingCamera = device
}
}//end iteration
//set a default device
currentDevice = backFacingCamera
//configure session w output for capturing still img
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput?.outputSettings = [AVVideoCodecKey : AVVideoCodecType.jpeg]
do {
//capture the data from whichevr camera we r using..imagine as buffer to translate data into real world image
let captureDeviceInput = try AVCaptureDeviceInput(device: currentDevice!)
captureSession.addInput(captureDeviceInput)
captureSession.addOutput(stillImageOutput!)
//setup camera preview layer
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
//add the preview to our specified view in the UI
view.layer.addSublayer(cameraPreviewLayer!)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.frame = cameraView.frame
captureSession.startRunning()
} catch let error {
print(error)
}//end do
}
#IBAction func livePhotoTapped(_ sender: UIButton) {
let videoConnection = stillImageOutput?.connection(with: .video)
//capture still image async
stillImageOutput?.captureStillImageAsynchronously(from: videoConnection!, completionHandler: { (imageDataBuffer, error) in
if let imageData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: imageDataBuffer!, previewPhotoSampleBuffer: imageDataBuffer!) {
self.stillImage = UIImage(data: imageData)
self.performSegue(withIdentifier: "toPreviewPhoto", sender: self)
}
})
}
Use like this :
For CMSampleBufferIsValid check this link
#IBAction func livePhotoTapped(_ sender: UIButton) {
let videoConnection = stillImageOutput?.connection(with: .video)
//capture still image async
stillImageOutput?.captureStillImageAsynchronously(from: videoConnection!, completionHandler: { (imageDataBuffer, error) in
if error != nil {
print("error \(error)")
} else {
if let imageBuffer = imageDataBuffer, CMSampleBufferIsValid(imageBuffer) {
if let imageData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: imageDataBuffer!, previewPhotoSampleBuffer: imageDataBuffer!) {
self.stillImage = UIImage(data: imageData)
self.performSegue(withIdentifier: "toPreviewPhoto", sender: self)
}
} else {
print("imageDataBuffer is nil or not valid")
}
}
})
}
You are force unwrapping a nil object.
var currentDevice: AVCaptureDevice?
It looks like this method has been deprecated:
+ (NSArray<AVCaptureDevice *> *)devices NS_DEPRECATED(10_7, NA, 4_0, 10_0, "Use AVCaptureDeviceDiscoverySession instead.");
Have you tried "AVCaptureDeviceDiscoverySession"?