How can i stop my frames from constantly being reloaded in swift - swift

I have a code that detects objects and transcribes them to speech. However, if various objects are detected in the same frame the voice output gets messed up and starts saying the object's name all together making no sense to the user. I'm developing this application for the visually impaired so I'm trying to focus on getting an object's name transcribing it to the user and then move on to the next object.
Here is the code below.
import UIKit
import AVKit
import Vision
import CoreML
import AVFoundation
class ViewController: UIViewController , AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var innerView: UIView!
#IBOutlet weak var viewLable: UILabel!
var previewLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
updateLable(newLable: "new lable")
//Start the Camera
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
// get back camera as Video Capture Device
guard let captureDevice = AVCaptureDevice.default(for: .video)
else { self.quickErr(myLine: #line,inputStr: "") ; return }
try? captureDevice.lockForConfiguration()
captureDevice.activeVideoMinFrameDuration = CMTimeMake(1, 2)
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(1, 2)
captureDevice.unlockForConfiguration()
guard let input = try? AVCaptureDeviceInput(device: captureDevice)
else { self.quickErr(myLine: #line,inputStr: "") ; return }
captureSession.addInput(input)
captureSession.startRunning()
self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.frame.size = self.innerView.frame.size
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.innerView.layer.addSublayer(self.previewLayer!)
self.previewLayer?.frame = view.frame
// let label = UILabel(frame: CGRect(x: 0, y: 0, width: 200, height: 21))
// label.center = CGPoint(x: 160, y: 285)
// label.textAlignment = .center
// label.text = "I'am a test label"
// self.view.addSubview(label)
// label.text = ""
//get access to video frames
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "VideoQueue"))
captureSession.addOutput(dataOutput)
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
self.previewLayer?.frame.size = self.innerView.frame.size
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
//print("Camera was able to capture a frame ", Date())
guard let pixcelBuffer:CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
else { self.quickErr(myLine: #line,inputStr: "") ; return }
guard let model = try? VNCoreMLModel(for: Resnet50().model)
else { self.quickErr(myLine: #line,inputStr: "") ; return }
let request = VNCoreMLRequest(model: model) { (finishedReq, err) in
//check err
//print(finishedReq.results)
guard let results = finishedReq.results as? [VNClassificationObservation]
else { self.quickErr(myLine: #line,inputStr: "") ; return }
guard let firstObservation = results.first
else { self.quickErr(myLine: #line,inputStr: "") ; return }
var myMessage = ""
var myConfident = 0
if (firstObservation.confidence > 0.2 ) {
myConfident = Int ( firstObservation.confidence * 100 )
let myIdentifier = firstObservation.identifier.split(separator: ",")
myMessage = "I am \(myConfident) % confidence this object is : \(myIdentifier[0]) "
} else {
myMessage = "I am not confidence to detect this object"
}
print(myMessage)
self.updateLable(newLable: myMessage)
if ( myConfident >= 70 ){
self.readyMe(myText: myMessage, myLang: "en_EN")
}
}
// Anaylize image
try? VNImageRequestHandler(cvPixelBuffer: pixcelBuffer, options: [:]).perform([request])
}
func readyMe(myText :String , myLang : String ) {
let uttrace = AVSpeechUtterance(string: myText )
uttrace.voice = AVSpeechSynthesisVoice(language: myLang)
uttrace.rate = 0.5
let synthesizer = AVSpeechSynthesizer()
synthesizer.speak(uttrace)
}
func quickErr(myLine: Int , inputStr : String = "" ) {
print("===> Guard Error \(inputStr) :\n    file:\(#file)\n    line:\(myLine)\n    function:\(#function) ")
}
func updateLable(newLable: String){
DispatchQueue.main.async { // Correct
self.viewLable?.text = "[ " + newLable + " ]"
}
}
}

Related

Memory leak when using AVCaptureSession to AVAssetWriter using AVAssetWriterDelegate for HLS

We have found AVAssetWriter to leak memory when using the needed delegate AVAssetWriterDelegate to create HLS fMP4 video. Even before ever using the given segment data to process and store.
When releasing the memory by hand (it feels like this is wrong) the memory leak seems to disappear.
Even in a minimal situation the memory increases rapidly.
import Cocoa
import AVFoundation
class ViewController: NSViewController {
override func viewDidLoad() {
super.viewDidLoad()
AVCaptureDevice.requestAccess(for: .video, completionHandler: {
_ in
})
}
private var fileWriter: AVAssetWriter!
private var videoInput: AVAssetWriterInput!
private var bufferAdaptor: AVAssetWriterInputPixelBufferAdaptor!
private var captureSession: AVCaptureSession!
internal let recordingQueue = DispatchQueue(label: "RecordingQueue", qos: .userInitiated)
internal let writerQueue = DispatchQueue(label: "WriterQueue", qos: .userInitiated)
#IBAction func startCapture(_ sender: NSButton) {
self.writerQueue.async {
let device = AVCaptureDevice.default(for: .video)!
try! device.lockForConfiguration()
device.activeFormat = device.formats.last!
device.activeVideoMaxFrameDuration = CMTime(value: 1, timescale: 25)
device.activeVideoMinFrameDuration = CMTime(value: 1, timescale: 25)
device.unlockForConfiguration()
self.fileWriter = AVAssetWriter(contentType: .mpeg4Movie)
self.fileWriter.preferredOutputSegmentInterval = CMTime(seconds: 0.2, preferredTimescale: 60000)
self.fileWriter.outputFileTypeProfile = .mpeg4AppleHLS
self.fileWriter.initialSegmentStartTime = .zero
let videoOutputSettings: [String: Any] = [
AVVideoWidthKey: 1920,
AVVideoHeightKey: 1080,
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoCompressionPropertiesKey: [
AVVideoProfileLevelKey: AVVideoProfileLevelH264HighAutoLevel,
AVVideoAverageBitRateKey: 6000 * 1024
]
]
self.videoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoOutputSettings)
self.fileWriter.movieTimeScale = CMTimeScale(exactly: 25)!
self.videoInput.mediaTimeScale = CMTimeScale(exactly: 25)!
self.videoInput.expectsMediaDataInRealTime = true
self.videoInput.performsMultiPassEncodingIfSupported = false
let sourcePixelBufferAttributes:[String:Any] = [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32ARGB,
kCVPixelBufferMetalCompatibilityKey as String: true,
]
self.bufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: self.videoInput, sourcePixelBufferAttributes: sourcePixelBufferAttributes)
if self.fileWriter.canAdd(self.videoInput) {
self.fileWriter.add(self.videoInput)
} else {
return
}
self.fileWriter.delegate = self
self.captureSession = AVCaptureSession()
self.captureSession?.beginConfiguration()
let videoInput = try! AVCaptureDeviceInput(device: device)
if self.captureSession?.canAddInput(videoInput) ?? false {
self.captureSession?.addInput(videoInput)
} else {
return
}
self.captureSession?.sessionPreset = AVCaptureSession.Preset.high
let videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.setSampleBufferDelegate(self, queue: self.recordingQueue)
videoDataOutput.alwaysDiscardsLateVideoFrames = true
videoDataOutput.videoSettings = [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
kCVPixelBufferMetalCompatibilityKey as String: true
]
if self.captureSession?.canAddOutput(videoDataOutput) ?? false {
self.captureSession?.addOutput(videoDataOutput)
} else {
return
}
self.captureSession?.commitConfiguration()
try! device.lockForConfiguration()
self.captureSession?.startRunning()
device.unlockForConfiguration()
self.fileWriter.startWriting()
self.fileWriter.startSession(atSourceTime: CMTime.zero)
}
}
func write(sample: CMSampleBuffer) {
if self.videoInput.isReadyForMoreMediaData {
self.videoInput.append(sample)
}
}
}
extension ViewController: AVAssetWriterDelegate {
func assetWriter(_ writer: AVAssetWriter, didOutputSegmentData segmentData: Data, segmentType: AVAssetSegmentType, segmentReport: AVAssetSegmentReport?) {
print(segmentData.count)
// let _ = segmentData.withUnsafeBytes {
// raw in
// raw.baseAddress?.deallocate()
// }
}
}
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
self.write(sample: sampleBuffer)
}
}
Run this small code in a new project and you will see the memory grow. Uncomment the lines in the delegate and it's as expected.
What are we missing? Or did we discover a bug? (Already sent in to Apple).
Any ideas are welcome to get this leak closed...

AVCaptureDevice builtInWideAngleCamera image does not match preview

I have a Swift project where I am using a UIImageView to show a live preview capture and 'freeze' this image whenever the uses clicks on a 'Take' photo button and have this shown in another UIImageView with identical dimensions and position.
This works great on devices that are able to use builtInDualCamera (such as a iPhone X) but on devices that rely on the fallback builtInWideAngleCamera (such as a 6th gen iPad Mini), the image appears cropped/zoomed in.
Can someone explain whether it is possible/how to get an image identical to the one shown in preview using the builtInDualCamera?
A minimal reproducible example can be found below (simply create a storyboard with 2 button and 2 UIImageViews and hook them up).
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate, AVCapturePhotoCaptureDelegate {
#IBOutlet weak var cameraImageView: UIImageView!
#IBOutlet weak var userImageView: UIImageView!
var captureSession: AVCaptureSession? = AVCaptureSession()
var currentDevice: AVCaptureDevice?
var videoFileOutput: AVCaptureMovieFileOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var cameraOutput : AVCapturePhotoOutput?
func setupCamSession(){
if #available(iOS 10.0, *) {
if let device = AVCaptureDevice.default(.builtInDualCamera, for: AVMediaType.video, position:.front) {
currentDevice = device
} else if let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: AVMediaType.video, position: .front) {
currentDevice = device
}
} else {
// Fallback on earlier versions
let devices = AVCaptureDevice.devices().filter{ ($0 as AnyObject).hasMediaType(AVMediaType.video) && ($0 as AnyObject).position == AVCaptureDevice.Position.front }
if let captureDevice = devices.first {
currentDevice = captureDevice
}
}
if(currentDevice==nil)
{
print("failed")
return
}
else
{
captureSession?.sessionPreset = AVCaptureSession.Preset.medium
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice!) else {
return
}
if (captureSession?.canAddInput(captureDeviceInput))! {
captureSession?.addInput(captureDeviceInput)
cameraOutput = AVCapturePhotoOutput()
if (captureSession?.canAddOutput(cameraOutput!))! {
captureSession?.addOutput(cameraOutput!)
}
}
else
{
print("failed")
return
}
}
func startCamSession()
{
if (captureSession==nil)
{
print("Warning: no captureSession detected")
return
}
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspect
cameraPreviewLayer?.frame = cameraImageView.bounds
cameraImageView.layer.addSublayer(cameraPreviewLayer!)
if let connection = cameraPreviewLayer?.connection {
let previewLayerConnection : AVCaptureConnection = connection
if previewLayerConnection.isVideoOrientationSupported {
previewLayerConnection.videoOrientation = .portrait
cameraPreviewLayer?.frame = cameraImageView.bounds
}
captureSession?.startRunning()
}
}
func stopCamSession()
{
captureSession?.stopRunning()
}
func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if let error = error {
print(error.localizedDescription)
}
if let sampleBuffer = photoSampleBuffer, let previewBuffer = previewPhotoSampleBuffer {
let imageData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewBuffer)
let dataProvider = CGDataProvider(data: imageData! as CFData)
let cgImageRef = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.absoluteColorimetric)
let image = UIImage(cgImage: cgImageRef!, scale: 1.0, orientation: UIImage.Orientation.leftMirrored)
self.userImageView.contentMode = .scaleAspectFit
self.userImageView.image = image
cameraPreviewLayer?.removeFromSuperlayer()
self.stopCamSession()
} else {
}
}
override func viewDidLoad() {
super.viewDidLoad()
setupCamSession()
}
#IBAction func startPressed(_ sender: Any) {
startCamSession()
}
#IBAction func takePhotoPressed(_ sender: Any) {
let settings = AVCapturePhotoSettings()
let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first!
let previewFormat = [
kCVPixelBufferPixelFormatTypeKey as String : previewPixelType,
kCVPixelBufferWidthKey as String : 640,
kCVPixelBufferHeightKey as String : 480
]
settings.previewPhotoFormat = previewFormat
cameraOutput?.capturePhoto(with: settings, delegate: self)
}
}

How can I record AVDepthData video and save in the gallery?

I am developing an application to record RGB-D sequences with the iPhone by using the DualRearCamera or the TrueDepthCamera. I can capture and visualize the RGB frame and depth frames and I developed a version where I can compress this data and save in the internal files of the iPhone. Nevertheless, my idea is to save both sequences (RGB and depth map sequences) in the gallery, but I am having problems to use AVAssetWritter and create a depth map video.
I am using the iPhone X, Xcode 10.2.1 and swift 5
import UIKit
import AVFoundation
import AssetsLibrary
var noMoreSpace = false
class ViewController: UIViewController{
#IBOutlet weak var previewView: UIImageView!
#IBOutlet weak var timeLabel: UILabel!
#IBOutlet weak var previewModeControl: UISegmentedControl!
let session = AVCaptureSession()
let dataOutputQueue = DispatchQueue(label: "video data queue")
let videoOutput = AVCaptureVideoDataOutput()
let movieOutput = AVCaptureMovieFileOutput()
let depthOutput = AVCaptureDepthDataOutput()
let depthCapture = DepthCapture()
var previewLayer = AVCaptureVideoPreviewLayer()
var inputDevice: AVCaptureDeviceInput!
let videoDeviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera, .builtInTrueDepthCamera], mediaType: .video, position: .unspecified)
var Timestamp: String {
let currentDate = NSDate()
let dateFormatter = DateFormatter()
dateFormatter.dateFormat = "ddMM_HHmmss"
return "\(dateFormatter.string(from: currentDate as Date))"
}
var isRecording = false
var time = 0
var timer = Timer()
enum PreviewMode: Int {
case original
case depth
}
var previewMode = PreviewMode.original
var depthMap: CIImage?
var scale: CGFloat = 0.0
//let sessionQueue = DispatchQueue(label: "session queue")
override func viewDidLoad() {
super.viewDidLoad()
timeLabel.isHidden = true //TODO: Disable the rest of the UI
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
configureCaptureSession()
session.startRunning()
}
func configureCaptureSession() {
session.beginConfiguration()
let camera = AVCaptureDevice.default(.builtInTrueDepthCamera, for: .video, position: .unspecified)!
do {
let cameraInput = try AVCaptureDeviceInput(device: camera)
if session.canAddInput(cameraInput){
session.sessionPreset = .vga640x480
session.addInput(cameraInput)
self.inputDevice = cameraInput
}
if session.canAddOutput(videoOutput){
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
session.addOutput(videoOutput)
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
//previewLayer = AVCaptureVideoPreviewLayer(session: session)
//previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
//previewLayer.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
//previewView.layer.addSublayer(previewLayer)
//previewLayer.position = CGPoint(x: self.previewView.frame.width / 2, y: self.previewView.frame.height / 2)
//previewLayer.bounds = previewView.frame
}
//Add Depth output to the session
if session.canAddOutput(depthOutput){
session.addOutput(depthOutput)
depthOutput.setDelegate(self, callbackQueue: dataOutputQueue)
depthOutput.isFilteringEnabled = true
let depthConnection = depthOutput.connection(with: .depthData)
depthConnection?.videoOrientation = .portrait
}
/*if session.canAddOutput(movieOutput){
session.addOutput(movieOutput)
}*/
} catch {
print("Error")
}
let outputRect = CGRect(x: 0, y: 0, width: 1, height: 1)
let videoRect = videoOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
let depthRect = depthOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
// Calculate the scaling factor between videoRect and depthRect
scale = max(videoRect.width, videoRect.height) / max(depthRect.width, depthRect.height)
// Change the AVCaptureDevice configuration, so you need to lock it
do{
try camera.lockForConfiguration()
// Set the AVCaptureDevice‘s minimum frame duration (which is the inverse of the maximum frame rate) to be equal to the supported frame rate of the depth data
if let frameDuration = camera.activeDepthDataFormat?.videoSupportedFrameRateRanges.first?.minFrameDuration{
camera.activeVideoMinFrameDuration = frameDuration
}
// Unlock the configuration you locked
camera.unlockForConfiguration()
}catch{
fatalError(error.localizedDescription)
}
session.commitConfiguration()
}
#IBAction func startStopRecording(_ sender: Any) {
if isRecording{
stopRecording()
} else {
startRecording()
}
}
func startRecording(){
timeLabel.isHidden = false
timer = Timer.scheduledTimer(timeInterval: 1, target: self, selector: #selector(ViewController.timerAction), userInfo: nil, repeats: true)
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
let flagTime = Timestamp
let auxStr = flagTime+"_output.mp4"
let fileUrl = paths[0].appendingPathComponent(auxStr)
depthCapture.prepareForRecording(timeFlag: flagTime)
movieOutput.startRecording(to: fileUrl, recordingDelegate: self)
print(fileUrl.absoluteString)
print("Recording started")
self.isRecording = true
}
func stopRecording(){
timeLabel.isHidden = true
timer.invalidate()
time = 0
timeLabel.text = "0"
movieOutput.stopRecording()
print("Stopped recording!")
self.isRecording = false
do {
try depthCapture.finishRecording(success: { (url: URL) -> Void in
print(url.absoluteString)
})
} catch {
print("Error while finishing depth capture.")
}
}
#objc func timerAction() {
time += 1
timeLabel.text = String(time)
}
#IBAction func previeModeChanged(_ sender: UISegmentedControl) {
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
}
#IBAction func switchCamera(_ sender: Any) {
let currentDevice = self.inputDevice.device
let currentPosition = currentDevice.position
let preferredPosition: AVCaptureDevice.Position
let preferredDeviceType: AVCaptureDevice.DeviceType
let devices = self.videoDeviceDiscoverySession.devices
var newVideoDevice: AVCaptureDevice? = nil
switch currentPosition {
case .unspecified, .front:
preferredPosition = .back
preferredDeviceType = .builtInDualCamera
case .back:
preferredPosition = .front
preferredDeviceType = .builtInTrueDepthCamera
#unknown default:
preferredPosition = .back
preferredDeviceType = .builtInDualCamera
}
// First, seek a device with both the preferred position and device type. Otherwise, seek a device with only the preferred position. ENTENDER MEJOR LQS CONDICIONES
if let device = devices.first(where: { $0.position == preferredPosition && $0.deviceType == preferredDeviceType }) {
newVideoDevice = device
} else if let device = devices.first(where: { $0.position == preferredPosition }) {
newVideoDevice = device
}
if let videoDevice = newVideoDevice {
do {
let cameraInput = try AVCaptureDeviceInput(device: videoDevice)
self.session.beginConfiguration()
self.session.removeInput(self.inputDevice)
if self.session.canAddInput(cameraInput) {
session.sessionPreset = .vga640x480
self.session.addInput(cameraInput)
self.inputDevice = cameraInput
}else {
self.session.addInput(self.inputDevice)
}
self.session.commitConfiguration()
} catch{
print("Error occurred while creating video device input: \(error)")
}
}
}
}
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let image = CIImage(cvPixelBuffer: pixelBuffer!)
let previewImage: CIImage
switch previewMode {
case .original:
previewImage = image
case .depth:
previewImage = depthMap ?? image
}
let displayImage = UIImage(ciImage: previewImage)
DispatchQueue.main.async {[weak self] in self?.previewView.image = displayImage}
}
}
extension ViewController: AVCaptureDepthDataOutputDelegate{
func depthDataOutput(_ output: AVCaptureDepthDataOutput, didOutput depthData: AVDepthData, timestamp: CMTime, connection: AVCaptureConnection) {
var convertedDepth: AVDepthData
// Ensure the depth data is the format you need: 32 bit FP disparity.???
if depthData.depthDataType != kCVPixelFormatType_DepthFloat16{
convertedDepth = depthData.converting(toDepthDataType: kCVPixelFormatType_DepthFloat32)
}else{
convertedDepth = depthData
}
// You save the depth data map from the AVDepthData object as a CVPixelBuffer
let pixelBuffer = convertedDepth.depthDataMap
//Using an extension, you then clamp the pixels in the pixel buffer to keep them between 0.0 and 1.0.
pixelBuffer.clamp()
// Convert the pixel buffer into a CIImage
let depthMap = CIImage(cvPixelBuffer: pixelBuffer)
// You store depthMap in a class variable for later use
DispatchQueue.main.async {
[weak self] in self?.depthMap = depthMap
}
}
}

Objects Track using vision framework in iOS 11

I want to detect object and track that object using vision framework. I am successfully done with detect objects and little bit with tracking also but I don't get so much accuracy with tracking.
I want much more accuracy while converting frames as its frequently lost the accuracy while track the objects.
Please check the below code for detect and track the objects:
import UIKit
import AVFoundation
import Vision
class ViewController: UIViewController {
private lazy var captureSession: AVCaptureSession = {
let session = AVCaptureSession()
session.sessionPreset = AVCaptureSession.Preset.photo
guard let backCamera = AVCaptureDevice.default(for: .video),
let input = try? AVCaptureDeviceInput(device: backCamera) else
{
return session
}
session.addInput(input)
return session
}()
private lazy var cameraLayer: AVCaptureVideoPreviewLayer =
AVCaptureVideoPreviewLayer(session: self.captureSession)
private let handler = VNSequenceRequestHandler()
fileprivate var lastObservation: VNDetectedObjectObservation?
lazy var highlightView: UIView = {
let view = UIView()
view.layer.borderColor = UIColor.red.cgColor
view.layer.borderWidth = 4
view.backgroundColor = .clear
return view
}()
override func viewDidLoad() {
super.viewDidLoad()
view.layer.addSublayer(cameraLayer)
view.addSubview(highlightView)
let output = AVCaptureVideoDataOutput()
output.setSampleBufferDelegate(self, queue: DispatchQueue(label:
"queue"))
captureSession.addOutput(output)
captureSession.startRunning()
let tapGestureRecognizer = UITapGestureRecognizer(target: self,
action: #selector(tapAction))
view.addGestureRecognizer(tapGestureRecognizer)
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
cameraLayer.frame = view.bounds
}
// MARK: - Actions
#objc private func tapAction(recognizer: UITapGestureRecognizer) {
highlightView.frame.size = CGSize(width: 120, height: 120)
highlightView.center = recognizer.location(in: view)
let originalRect = highlightView.frame
var convertedRect =
cameraLayer.metadataOutputRectConverted(fromLayerRect:
originalRect)
convertedRect.origin.y = 1 - convertedRect.origin.y
lastObservation = VNDetectedObjectObservation(boundingBox:
convertedRect)
}
fileprivate func handle(_ request: VNRequest, error: Error?) {
DispatchQueue.main.async {
guard let newObservation = request.results?.first as?
VNDetectedObjectObservation else {
return
}
self.lastObservation = newObservation
var transformedRect = newObservation.boundingBox
transformedRect.origin.y = 1 - transformedRect.origin.y
let convertedRect =
self.cameraLayer.layerRectConverted(fromMetadataOutputRect:
transformedRect)
self.highlightView.frame = convertedRect
}
}
}
extension ViewController:
AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer:
CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer =
CMSampleBufferGetImageBuffer(sampleBuffer),
let observation = lastObservation else {
return
}
let request = VNTrackObjectRequest(detectedObjectObservation:
observation) { [unowned self] request, error in
self.handle(request, error: error)
}
request.trackingLevel = .accurate
do {
try handler.perform([request], on: pixelBuffer)
}
catch {
print(error)
}
}
}
Any help will be appreciated!!
Thanks.
I am not so good at vision and core ml, but apparently your code looks fine. One thing you can do is check when vision does not get any tracking in the buffer, you have to mark its property isLastFrame true if tracking request confidence value falls to 0.
if !trackingRequest.isLastFrame {
if observation.confidence > 0.7 {
trackingRequest.inputObservation = observation
} else {
trackingRequest.isLastFrame = true
}
newTrackingRequests.append(trackingRequest)
}
This way its easy to find out whether vision tracking request lost tracking object or it just tracking the wrong object.

Swift 3: How do I enable flash on custom AVFoundation camera?

I have a very basic AVFoundation Camera that has a captureButton that will take a photo and send that photo to the secondCameraController for it to be displayed. My problem is that there is a lot of iOS 10 deprecation and I'm not sure how I add in a flash when I press the captureButton. Any help will be highly appreciated. My code is below. Thank you guys.
class CameraController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let captureSession = AVCaptureSession()
var previewLayer: CALayer!
var captureDevice: AVCaptureDevice!
var takePhoto: Bool = false
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = .white
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
prepareCamera()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
navigationController?.setNavigationBarHidden(true, animated: true)
}
let cameraView: UIView = {
let view = UIView()
view.backgroundColor = .red
return view
}()
func prepareCamera() {
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
if let availableDevices = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .back).devices {
captureDevice = availableDevices.first
beginSession()
}
}
func beginSession() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = CGRect(x: 0, y: 0, width: view.frame.width, height: view.frame.height)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
self.view.addSubview(captureButton)
let width: CGFloat = 85
captureButton.frame = CGRect(x: (previewLayer.frame.width / 2) - width / 2, y: (previewLayer.frame.height) - width - 25, width: width, height: 85)
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.cheekylabsltd.camera")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
}
func handleCapture() {
takePhoto = true
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if takePhoto {
takePhoto = false
if let image = self.getImageFromSampleBuffer(buffer: sampleBuffer) {
let secondController = SecondCameraController()
secondController.takenPhoto = image
DispatchQueue.main.async {
self.present(secondController, animated: true, completion: {
self.stopCaptureSession()
})
}
}
}
}
func getImageFromSampleBuffer(buffer: CMSampleBuffer) -> UIImage? {
if let pixelBuffer = CMSampleBufferGetImageBuffer(buffer) {
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext()
let imageRect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
if let image = context.createCGImage(ciImage, from: imageRect) {
return UIImage(cgImage: image, scale: UIScreen.main.scale, orientation: .right)
}
}
return nil
}
func stopCaptureSession() {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
lazy var captureButton: UIButton = {
let button = UIButton(type: .system)
button.backgroundColor = .white
button.layer.cornerRadius = 42.5
button.clipsToBounds = true
button.alpha = 0.40
button.layer.borderWidth = 4
button.layer.borderColor = greenColor.cgColor
button.addTarget(self, action: #selector(handleCapture), for: .touchUpInside)
return button
}()
}
Try this code :
Swift v3.0
private func flashOn(device:AVCaptureDevice)
{
do{
if (device.hasTorch)
{
try device.lockForConfiguration()
device.torchMode = .on
device.flashMode = .on
device.unlockForConfiguration()
}
}catch{
//DISABEL FLASH BUTTON HERE IF ERROR
print("Device tourch Flash Error ");
}
}
//FOR FLASH OFF CODE
private func flashOff(device:AVCaptureDevice)
{
do{
if (device.hasTorch){
try device.lockForConfiguration()
device.torchMode = .off
device.flashMode = .off
device.unlockForConfiguration()
}
}catch{
//DISABEL FLASH BUTTON HERE IF ERROR
print("Device tourch Flash Error ");
}
}
// METHOD
//private let session = AVCaptureSession()
//MARK: FLASH UITLITY METHODS
func toggleFlash() {
var device : AVCaptureDevice!
if #available(iOS 10.0, *) {
let videoDeviceDiscoverySession = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera, .builtInDuoCamera], mediaType: AVMediaTypeVideo, position: .unspecified)!
let devices = videoDeviceDiscoverySession.devices!
device = devices.first!
} else {
// Fallback on earlier versions
device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
}
if ((device as AnyObject).hasMediaType(AVMediaTypeVideo))
{
if (device.hasTorch)
{
self.session.beginConfiguration()
//self.objOverlayView.disableCenterCameraBtn();
if device.isTorchActive == false {
self.flashOn(device: device)
} else {
self.flashOff(device: device);
}
//self.objOverlayView.enableCenterCameraBtn();
self.session.commitConfiguration()
}
}
}
Swift 4
So there are two different behaviors to choose from in AVFoundation. One would be a capture device torch switch. Connect the torchSwitch action to some view and be sure to change CameraManager.shared.backDevice to your instance of the front or back device that provides the current input.
#IBAction func torchSwitch(_ sender: Any) {
guard let device = CameraManager.shared.backDevice else { return }
guard device.isTorchAvailable else { return }
do {
try device.lockForConfiguration()
device.torchMode = device.torchMode ? .off : .on
if device.torchMode == .on {
try device.setTorchModeOn(level: 0.7)
}
} catch {
debugPrint(error)
}
}
AVFoundation has deprecated device.flashMode
Now to set flash, declare a variable on camera or vc. The value here will be the default.
var flash: AVCaptureFlashMode = .off
Connect this action to some view
#IBAction func torchSwitch(_ sender: Any) { flash = flash ? .off : .on }
Then when you want to capture an image, use AVCapturePhotoOutput and prepare the photo settings. stillCameraOutput is an instance of AVCapturePhotoOutput.
let settings = AVCapturePhotoSettings()
settings.flashMode = flash
stillCameraOutput.capturePhoto(with: settings, delegate: self)
Swift 4 :
Following code is working fine for me
private enum FlashPhotoMode {
case on
case off
}
#IBOutlet weak var flashPhotoModeButton: UIButton!
#IBAction func toggleFlashPhotoMode(_ flashPhotoModeButton: UIButton ) {
sessionQueue.async {
self.flashPhotoMode = (self.flashPhotoMode == .on) ? .off : .on
let flashPhotoMode = self.flashPhotoMode
DispatchQueue.main.async {
if flashPhotoMode == .on {
self.flashPhotoModeButton.setBackgroundImage(UIImage(named: "flashON"), for: .normal)
print("flashON")
} else {
self.flashPhotoModeButton.setBackgroundImage(UIImage(named: "flashOFF"), for: .normal)
print("flashOFF")
}
}
}
}
#IBAction private func capturePhoto(_ photoButton: UIButton) {
................
.......................
if self.videoDeviceInput.device.isFlashAvailable {
if self.flashPhotoMode == .on {
photoSettings.flashMode = .on
print("FLASH ON ")
} else {
print("FLASH OFF ")
photoSettings.flashMode = .off
}
}
}
Thanks!