swift Avcapture session for barcode scanning is not working - swift

I am trying to build a barcode scanner. I adapted some of this tutorial. The video capture session is working but it is not detecting any barcode. I have gone through the code multiple times and still could not find what the problem could be. Here is my code for detecting the barcode
class ScanController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var captureSession: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var qrCodeFrameView: UIView?
let supportedCodeTypes = [AVMetadataObject.ObjectType.upce,
AVMetadataObject.ObjectType.code39,
AVMetadataObject.ObjectType.qr]
override func viewDidLoad() {
super.viewDidLoad()
//Get an instance of the AVCaptureDevice class a device object and provide the video as the media type parameter
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
do {
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
let input = try AVCaptureDeviceInput(device: captureDevice!)
// Initialize the captureSession object.
captureSession = AVCaptureSession()
// Set the input device on the capture session.
captureSession?.addInput(input)
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession?.addOutput(captureMetadataOutput)
// Set delegate and use the default dispatch queue to execute the call back
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = supportedCodeTypes
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer.
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
// Start video capture.
captureSession?.startRunning()
// Add the message label
self.view.addSubview(messageLabel)
//initialize QR Code Frame to highlight the QR Code
qrCodeFrameView = UIView()
if let qrCodeFrameView = qrCodeFrameView {
qrCodeFrameView.layer.borderColor = UIColor.green.cgColor
qrCodeFrameView.layer.borderWidth = 2
view.addSubview(qrCodeFrameView)
view.bringSubview(toFront: qrCodeFrameView)
}
} catch {
// If any error occurs, simply print it out and don't continue any more.
print("THERE IS A PROBLEM WITH THE CAPTURE SESSION *****************")
print(error)
return
}
}
}
what am I missing ?

maybe you missing the Delegate Methods? In the Tutorial is the delegate method :
optional func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection)
under the section Decoding the QR Code

Related

MTAudioProcessingTap - prepare{} and process{} callbacks are not executing

I am working on loading a (local) movie into AVPlayer and applying processing to the audio track with an audioTapProcessor. So far I've found great GitHub examples here, here, and here. I'm using the "tap cookie" approach used in the last link and in an answer to this previous question.
Audio & video playback are working fine. However, my tapPrepare and tapProcess callbacks are not being called, but Init and Finalize are. So I'm doing something both right and wrong. relevant code attached -- Any help appreciated!
import Foundation
import AVFoundation
import AudioToolbox
import MediaToolbox
import CoreAudioTypes
class PlayerViewController: UIViewController {
class TapCookie {
weak var content: PlayerViewController?
deinit {
print("TapCookie deinit") // appears after tapFinalize
}
}
// MARK: Properties
var playerAsset: AVURLAsset?
var playerItem: AVPlayerItem! = nil
var audioProcessingFormat: AudioStreamBasicDescription?
private var tracksObserver: NSKeyValueObservation?
// MARK: Button to trigger actions
#IBAction func selectVideo(_ sender: Any) {
// starts doing stuff:
// - select a video file from device, extract movieURL string ...
playerAsset = AVURLAsset(url: movieURL)
playerItem = AVPlayerItem(url: movieURL)
//... then send asset to AVPlayer (not shown)
// set up audioProcessingTap
tracksObserver = playerItem.observe(\AVPlayerItem.tracks, options: [.initial, .new]) {
[unowned self] item, change in
installTap(playerItem: playerItem)
}
}
func installTap(playerItem: AVPlayerItem) {
let cookie = TapCookie()
cookie.content = self
var callbacks = MTAudioProcessingTapCallbacks(
version: kMTAudioProcessingTapCallbacksVersion_0,
clientInfo: UnsafeMutableRawPointer(Unmanaged.passRetained(cookie).toOpaque()),
init: tapInit,
finalize: tapFinalize,
prepare: tapPrepare,
unprepare: tapUnprepare,
process: tapProcess)
var tap: Unmanaged<MTAudioProcessingTap>?
let err = MTAudioProcessingTapCreate(kCFAllocatorDefault, &callbacks, kMTAudioProcessingTapCreationFlag_PostEffects, &tap)
assert(noErr == err);
// tapInit successfully called after MTAudioProcessingTapCreate
let audioMix = AVMutableAudioMix()
let audioTrack = playerItem.asset.tracks(withMediaType: AVMediaType.audio).first! //use first audio track
let inputParams = AVMutableAudioMixInputParameters(track: audioTrack)
inputParams.audioTapProcessor = tap?.takeRetainedValue()
audioMix.inputParameters = [inputParams]
playerItem.audioMix = audioMix
}
// MARK: install tap callbacks
let tapInit: MTAudioProcessingTapInitCallback = {
(tap, clientInfo, tapStorageOut) in
tapStorageOut.pointee = clientInfo
print("tapInit tap: \(tap)\n clientInfo: \(String(describing: clientInfo))\n tapStorageOut: \(tapStorageOut)\n")
}
// tapPrepare not called !!
let tapPrepare: MTAudioProcessingTapPrepareCallback = {
(tap, maxFrames, processingFormat) in
print("tapPrepare tap: \(tap), maxFrames: \(maxFrames)\n processingFormat: \(processingFormat)")
let cookie = Unmanaged<TapCookie>.fromOpaque(MTAudioProcessingTapGetStorage(tap)).takeUnretainedValue()
cookie.content!.audioProcessingFormat = AudioStreamBasicDescription(mSampleRate: processingFormat.pointee.mSampleRate,
mFormatID: processingFormat.pointee.mFormatID,
mFormatFlags: processingFormat.pointee.mFormatFlags,
mBytesPerPacket: processingFormat.pointee.mBytesPerPacket,
mFramesPerPacket: processingFormat.pointee.mFramesPerPacket,
mBytesPerFrame: processingFormat.pointee.mBytesPerFrame,
mChannelsPerFrame: processingFormat.pointee.mChannelsPerFrame,
mBitsPerChannel: processingFormat.pointee.mBitsPerChannel,
mReserved: processingFormat.pointee.mReserved)
}
let tapUnprepare: MTAudioProcessingTapUnprepareCallback = {
(tap) in
print("tapUnprepare \(tap)")
}
// tapProcess not called !!
let tapProcess: MTAudioProcessingTapProcessCallback = {
(tap, numberFrames, flags, bufferListInOut, numberFramesOut, flagsOut) in
print("tapProcess \(tap)\n \(numberFrames)\n \(flags)\n \(bufferListInOut)\n \(numberFramesOut)\n \(flagsOut)\n")
let status = MTAudioProcessingTapGetSourceAudio(tap, numberFrames, bufferListInOut, flagsOut, nil, numberFramesOut)
if noErr != status {
print("get audio: \(status)")
}
let cookie = Unmanaged<TapCookie>.fromOpaque(MTAudioProcessingTapGetStorage(tap)).takeUnretainedValue()
guard let cookieContent = cookie.content else {
print("Tap callback: cookie content was deallocated!")
return
}
// process audio here...
}
let tapFinalize: MTAudioProcessingTapFinalizeCallback = {
(tap) in
print("tapFinalize \(tap)")
// release cookie
Unmanaged<TapCookie>.fromOpaque(MTAudioProcessingTapGetStorage(tap)).release()
}
}
You need to create an AVPlayer
player = AVPlayer(playerItem: playerItem)
and then at some point start it playing:
player.play()
Then the prepare and process callbacks will be called.

Swift - Recorded Video is Mirrored on Front Camera - How to flip?

I'm trying to mirror the recorded video from a capture session. The video preview for front facing camera shows a mirrored version, however, when I go to save the file and play it back, the captured video is actually mirrored. I'm using Apple's AVCam demo as a reference and can't seem to figure this out! Please help.
I've tried creating an AVCaptureConnection and trying to set the .isVideoMirrored parameter. However, I get this error:
cannot be added to the session because the source and destination media types are incompatible'
I would have thought mirroring the video would be much easier. I think I may be creating my connection incorrectly. The code below doesn't actually "Add connection" when I call the .canAddConnection check.
var captureSession: AVCaptureSession!
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
captureSession = AVCaptureSession()
//Setup Camera
if let dualCameraDevice = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .front) {
defaultVideoDevice = dualCameraDevice
} else if let frontCameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front) {
// If the rear wide angle camera isn't available, default to the front wide angle camera.
defaultVideoDevice = frontCameraDevice
}
guard let videoDevice = defaultVideoDevice else {
print("Default video device is unavailable.")
// setupResult = .configurationFailed
captureSession.commitConfiguration()
return
}
let videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
if captureSession.canAddInput(videoDeviceInput) {
captureSession.addInput(videoDeviceInput)
}
let movieOutput = AVCaptureMovieFileOutput()
//Video Input variable for AVCapture Connection
let videoInput: [AVCaptureInput.Port] = videoDeviceInput.ports
if captureSession.canAddOutput(movieOutput) {
captureSession.beginConfiguration()
captureSession.addOutput(movieOutput)
captureSession.sessionPreset = .medium
Then I try to setup the AVCapture connection and try to set the parameters for mirroring. Please tell me if there is an easier way to mirror the output / playback.
avCaptureConnection = AVCaptureConnection(inputPorts: videoInput, output: movieOutput)
avCaptureConnection.isEnabled = true
//Mirror the capture connection?
avCaptureConnection.automaticallyAdjustsVideoMirroring = false
avCaptureConnection.isVideoMirrored = false
//Check if we can add a connection
if captureSession.canAddConnection(avCaptureConnection) {
//Add the connection
captureSession.addConnection(avCaptureConnection)
}
captureSession.commitConfiguration()
self.movieOutput = movieOutput
setupLivePreview()
}
}
Somewhere else in the code, connected to an IBAaction, I initialize the recording
// Start recording video to a temporary file.
let outputFileName = NSUUID().uuidString
let outputFilePath = (NSTemporaryDirectory() as NSString).appendingPathComponent((outputFileName as NSString).appendingPathExtension("mov")!)
print("Recording in tap function")
movieOutput.startRecording(to: URL(fileURLWithPath: outputFilePath), recordingDelegate: self)
I think I'm using AVCaptureConnection incorrectly, especially because of the error stating media types are incompatible. If there is a proper way to implement this function please do let me know. Also open to hearing suggestions for an easier way to mirror the playback. Thank you!

What is wrong with my custom camera view?

I followed this video: https://www.youtube.com/watch?v=7TqXrMnfJy8&t=45s to the T. But when I open the camera view all I see is the black screen and white button. I get no error messages when I try I load the camera view. Can someone please assist me with what I'm doing wrong?
My code is below:
import UIKit
import AVFoundation
class CameraViewController: UIViewController {
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession(){
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice(){
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices{
if device.position == AVCaptureDevice.Position.back {
backCamera = device
}
}
currentCamera = backCamera
}
func setupInputOutput(){
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format:[AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
} catch {
print(error)
}
}
func setupPreviewLayer(){
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = self.view.frame
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 1)
}
func startRunningCaptureSession(){
captureSession.startRunning()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated
}
}
I ran your code and it worked perfectly fine — almost! The only problem is that I had to add a Privacy — Camera Usage Description entry to the app's Info.plist. Otherwise the app crashes.
Once I did that and ran your code, I saw the live camera view on my device.
So why isn't it working for you? Let's think of some possible reasons. You didn't give enough info to know for sure (seeing as the code itself works just fine), but here are some possibilities:
You don't have the Privacy — Camera Usage Description entry in the app's Info.plist.
You are testing on the Simulator. Maybe this code works only on a device.
There is something in your interface in front of the sublayer that you add when you say insertSublayer. To test this, try saying addSublayer instead; this will make the camera layer the frontmost layer (this is just for testing purposes, remember).
Maybe your code never runs at all? Perhaps we never actually go to this view controller. To test that theory, put a print statement in your viewDidLoad and see if it actually prints to the console.
Maybe your code runs too soon? To test that theory, move all those calls out of viewDidLoad and into something later, such as viewDidAppear. Remember, this is just for testing purposes.
Hopefully one of those will help you figure out what the problem is.

How to capture depth data from camera in iOS 11 and Swift 4?

I'm trying to get depth data from the camera in iOS 11 with AVDepthData, tho when I setup a photoOutput with the AVCapturePhotoCaptureDelegate the photo.depthData is nil.
So I tried setting up the AVCaptureDepthDataOutputDelegate with a AVCaptureDepthDataOutput, tho I don't know how to capture the depth photo?
Has anyone ever got an image from AVDepthData?
Edit:
Here's the code I tried:
// delegates: AVCapturePhotoCaptureDelegate & AVCaptureDepthDataOutputDelegate
#IBOutlet var image_view: UIImageView!
#IBOutlet var capture_button: UIButton!
var captureSession: AVCaptureSession?
var sessionOutput: AVCapturePhotoOutput?
var depthOutput: AVCaptureDepthDataOutput?
var previewLayer: AVCaptureVideoPreviewLayer?
#IBAction func capture(_ sender: Any) {
self.sessionOutput?.capturePhoto(with: AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg]), delegate: self)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
self.previewLayer?.removeFromSuperlayer()
self.image_view.image = UIImage(data: photo.fileDataRepresentation()!)
let depth_map = photo.depthData?.depthDataMap
print("depth_map:", depth_map) // is nil
}
func depthDataOutput(_ output: AVCaptureDepthDataOutput, didOutput depthData: AVDepthData, timestamp: CMTime, connection: AVCaptureConnection) {
print("depth data") // never called
}
override func viewDidLoad() {
super.viewDidLoad()
self.captureSession = AVCaptureSession()
self.captureSession?.sessionPreset = .photo
self.sessionOutput = AVCapturePhotoOutput()
self.depthOutput = AVCaptureDepthDataOutput()
self.depthOutput?.setDelegate(self, callbackQueue: DispatchQueue(label: "depth queue"))
do {
let device = AVCaptureDevice.default(for: .video)
let input = try AVCaptureDeviceInput(device: device!)
if(self.captureSession?.canAddInput(input))!{
self.captureSession?.addInput(input)
if(self.captureSession?.canAddOutput(self.sessionOutput!))!{
self.captureSession?.addOutput(self.sessionOutput!)
if(self.captureSession?.canAddOutput(self.depthOutput!))!{
self.captureSession?.addOutput(self.depthOutput!)
self.previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession!)
self.previewLayer?.frame = self.image_view.bounds
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
self.image_view.layer.addSublayer(self.previewLayer!)
}
}
}
} catch {}
self.captureSession?.startRunning()
}
I'm trying two things, one where the depth data is nil and one where I'm trying to call a depth delegate method.
Dose anyone know what I'm missing?
First, you need to use the dual camera, otherwise you won't get any depth data.
let device = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back)
And keep a reference to your queue
let dataOutputQueue = DispatchQueue(label: "data queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
You'll also probably want to synchronize the video and depth data
var outputSynchronizer: AVCaptureDataOutputSynchronizer?
Then you can synchronize the two outputs in your viewDidLoad() method like this
if sessionOutput?.isDepthDataDeliverySupported {
sessionOutput?.isDepthDataDeliveryEnabled = true
depthDataOutput?.connection(with: .depthData)!.isEnabled = true
depthDataOutput?.isFilteringEnabled = true
outputSynchronizer = AVCaptureDataOutputSynchronizer(dataOutputs: [sessionOutput!, depthDataOutput!])
outputSynchronizer!.setDelegate(self, queue: self.dataOutputQueue)
}
I would recommend watching WWDC session 507 - they also provide a full sample app that does exactly what you want.
https://developer.apple.com/videos/play/wwdc2017/507/
To give more details to #klinger answer, here is what you need to do to get Depth Data for each pixel, I wrote some comments, hope it helps!
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
//## Convert Disparity to Depth ##
let depthData = (photo.depthData as AVDepthData!).converting(toDepthDataType: kCVPixelFormatType_DepthFloat32)
let depthDataMap = depthData.depthDataMap //AVDepthData -> CVPixelBuffer
//## Data Analysis ##
// Useful data
let width = CVPixelBufferGetWidth(depthDataMap) //768 on an iPhone 7+
let height = CVPixelBufferGetHeight(depthDataMap) //576 on an iPhone 7+
CVPixelBufferLockBaseAddress(depthDataMap, CVPixelBufferLockFlags(rawValue: 0))
// Convert the base address to a safe pointer of the appropriate type
let floatBuffer = unsafeBitCast(CVPixelBufferGetBaseAddress(depthDataMap), to: UnsafeMutablePointer<Float32>.self)
// Read the data (returns value of type Float)
// Accessible values : (width-1) * (height-1) = 767 * 575
let distanceAtXYPoint = floatBuffer[Int(x * y)]
}
There are two ways to do this, and you are trying to do both at once:
Capture depth data along with the image. This is done by using the photo.depthData object from photoOutput(_:didFinishProcessingPhoto:error:). I explain why this did not work for you below.
Use a AVCaptureDepthDataOutput and implement depthDataOutput(_:didOutput:timestamp:connection:). I am not sure why this did not work for you, but implementing depthDataOutput(_:didOutput:timestamp:connection:) might help you figure out why.
I think that #1 is a better option, because it pairs the depth data with the image. Here's how you would do that:
#IBAction func capture(_ sender: Any) {
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
settings.isDepthDataDeliveryEnabled = true
self.sessionOutput?.capturePhoto(with: settings, delegate: self)
}
// ...
override func viewDidLoad() {
// ...
self.sessionOutput = AVCapturePhotoOutput()
self.sessionOutput.isDepthDataDeliveryEnabled = true
// ...
}
Then, depth_map shouldn't be nil. Make sure to read both this and this (separate but similar pages) for more information about obtaining depth data.
For #2, I'm not quite sure why depthDataOutput(_:didOutput:timestamp:connection:) isn't being called, but you should implement depthDataOutput(_:didDrop:timestamp:connection:reason:) to see if depth data is being dropped for some reason.
The way you init your capture device is not right.
You should use the dual camera mode.
as for oc like follows:
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithDeviceType:AVCaptureDeviceTypeBuiltInDualCamera mediaType:AVMediaTypeVideo position:AVCaptureDevicePositionBack];

Camera feed of dimensions one pixel by one pixel

This is a rather strange request, but I am looking to build an app that has a live camera feed taking up the whole screen. However, instead of displaying the normal resolution it would all be one color. In particular, I want to take the color of what normally would be the middle pixel on the screen and make that take up the entire screen. It needs to be done live and fast.
I attempted to make a function which saved the capturesession as a uiimage and then got the pixel data from that, however, it proved to be slow in real time. Any suggestions?
Assuming you have an AVCaptureSession setup. You need to setup a AVCaptureVideoDataOutput and then setup its sample buffer delegate. The delegate class should override func captureOutput(AVCaptureOutput!, CMSampleBuffer!, AVCaptureConnection!). Within this function you can get access to the pixel buffer to sample your centre point. You could do it as below. I've left the actual sampling of the centre point to you.
class MyClass : NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
func addVideoOutput() {
// Add video data output.
if session.canAddOutput(videoDataOutput)
{
videoDataOutput.setSampleBufferDelegate(self, queue: sessionQueue)
videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as NSString:Int(kCVPixelFormatType_32BGRA)]
videoDataOutput.alwaysDiscardsLateVideoFrames = true
session.addOutput(videoDataOutput)
}
}
// AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if let buffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
process(pixelBuffer: buffer)
}
}
func process(pixelBuffer: CVPixelBuffer) {
let sourceRowBytes = CVPixelBufferGetBytesPerRow( pixelBuffer );
let width = CVPixelBufferGetWidth( pixelBuffer );
let height = CVPixelBufferGetHeight( pixelBuffer );
let rt = CVPixelBufferLockBaseAddress( pixelBuffer, .readOnly );
if (rt == kCVReturnSuccess) {
...
Do your processing of the pixeldata here
...
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
}
}
private let session = AVCaptureSession()
private let sessionQueue = DispatchQueue(label: "session queue", attributes: [], target: nil) // Communicate with the session
private let videoDataOutput = AVCaptureVideoDataOutput()
}