Support all orientation in AVCaptureSession - swift

I have a simple camera app, and I want it to rotate the camera view when I rotate the iPhone.
Currently, I setup AVCaptureSession like that
func setupCaptureSession(camera: AVCaptureDevice.Position = .front) {
guard let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: camera) else {
fatalError("Error getting AVCaptureDevice.")
}
guard let input = try? AVCaptureDeviceInput(device: device) else {
fatalError("Error getting AVCaptureDeviceInput")
}
DispatchQueue.global(qos: .userInitiated).async { [weak self] in
guard let self = self else { return }
self.session = AVCaptureSession()
self.session?.sessionPreset = .hd1280x720
self.session?.addInput(input)
let output = AVCaptureVideoDataOutput()
output.alwaysDiscardsLateVideoFrames = true
output.setSampleBufferDelegate(self, queue: .main)
self.session?.addOutput(output)
output.connections.first?.videoOrientation = .portrait
self.session?.startRunning()
}
}
Currently it supports portrait and if I remove output.connections.first?.videoOrientation = .portrait it will be support only landscape. I want it to support all the origination and camera view rotate when I rotate the phone. Your help will be appreciated

Use orientationDidChangeNotification to detect when the device is rotating. Add it in the viewDidLoad
NotificationCenter.default.addObserver(self, selector: #selector(deviceOrientationDidChange), name: UIDevice.orientationDidChangeNotification, object: nil)
and
#objc private func deviceOrientationDidChange() {
switch UIDevice.current.orientation {
case .portrait:
self.videoDataOutput.connections.first?.videoOrientation = AVCaptureVideoOrientation.portrait
case .portraitUpsideDown:
self.videoDataOutput.connections.first?.videoOrientation = AVCaptureVideoOrientation.portraitUpsideDown
case .landscapeLeft:
self.videoDataOutput.connections.first?.videoOrientation = AVCaptureVideoOrientation.landscapeRight
case .landscapeRight:
self.videoDataOutput.connections.first?.videoOrientation = AVCaptureVideoOrientation.landscapeLeft
default: break
}
}

Related

Having trouble with flipping camera in swiftui / avfoundation / AVCaptureDeviceInput

I am coding a camera with swiftui using avfoundation and was able to get the setup to work as intended. However, as I'm implementing a flip camera functionality I'm running into an error where after flipping it just goes to a black screen as I'm assuming the input gets removed but the correct flipped input doesn't get shown:
Here is my code
class CameraViewModel: NSObject,ObservableObject,AVCaptureFileOutputRecordingDelegate, AVCapturePhotoCaptureDelegate{
...
#Published var session = AVCaptureSession()
#objc dynamic var videoDeviceInput: AVCaptureDeviceInput!
private let sessionQueue = DispatchQueue(label: "session queue")
func setUp(){
do{
self.session.beginConfiguration()
let cameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)
let videoInput = try AVCaptureDeviceInput(device: cameraDevice!)
let audioDevice = AVCaptureDevice.default(for: .audio)
let audioInput = try AVCaptureDeviceInput(device: audioDevice!)
// MARK: Audio Input
if self.session.canAddInput(videoInput) && self.session.canAddInput(audioInput){
self.session.addInput(videoInput)
self.session.addInput(audioInput)
self.videoDeviceInput = videoInput
}
if self.session.canAddOutput(self.output){
self.session.addOutput(self.output)
}
if self.session.canAddOutput(self.photoOutput){
self.session.addOutput(self.photoOutput)
}
self.session.commitConfiguration()
}
catch{
print(error.localizedDescription)
}
}
func changeCamera() {
sessionQueue.async {
if self.videoDeviceInput != nil {
let currentVideoDevice = self.videoDeviceInput.device
let currentPosition = currentVideoDevice.position
let preferredPosition: AVCaptureDevice.Position
switch currentPosition {
case .unspecified, .front:
preferredPosition = .back
case .back:
preferredPosition = .front
#unknown default:
print("Unknown capture position. Defaulting to back, dual-camera.")
preferredPosition = .back
}
print("current pos is \(currentPosition.rawValue) and preferred position is \(preferredPosition.rawValue)")
do{
self.session.beginConfiguration()
//remove device as needed
self.session.removeInput(self.videoDeviceInput)
let newCameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: preferredPosition)
let newVideoInput = try AVCaptureDeviceInput(device: newCameraDevice!)
let newAudioDevice = AVCaptureDevice.default(for: .audio)
let newAudioInput = try AVCaptureDeviceInput(device: newAudioDevice!)
// MARK: Audio Input
if self.session.canAddInput(newVideoInput) && self.session.canAddInput(newAudioInput){
self.session.addInput(newVideoInput)
self.session.addInput(newAudioInput)
self.videoDeviceInput = newVideoInput
}
self.session.commitConfiguration()
}catch{
print(error.localizedDescription)
}
}
}
}
}
I'm not sure what I'm doing wrong as I've looked up previous stack overflow threads and online resources and all they say is to get the device input position and change that, remove it before committing configuration. Any help will be greatly appreciated!
Edit: I found the solution was to get rid of the audio input code as the audio is still being captured without it
The problem is in the code you are not showing.
When you are displaying the Image in your SwiftUI view, the orientation depends on the used camera.
For the front camera, it is .upMirrored.
If you switch to use the back camera, you need to use .up.

Why does adding an AVCapturePhotoOutput to my AVCaptureSession cause the AVCaptureVideoPreviewLayer to turn break on iPhone X?

After configuring my video layer as such:
public class VideoLayerView: UIView {
override public class var layerClass: Swift.AnyClass {
return AVCaptureVideoPreviewLayer.self
}
public override func awakeFromNib() {
super.awakeFromNib()
self.clipsToBounds = true
}
public func configureCaptureLayer(session: AVCaptureSession?) {
guard let captureLayer = self.layer as? AVCaptureVideoPreviewLayer else { return }
captureLayer.session = session
captureLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
}
}
And setting up the session:
func initializeCamera() {
guard !captureSession.isRunning else {
print("Capture session already running")
return
}
guard Permissions.shared.isCameraAuthorized else {
print("Requesting Camera Permission")
Permissions.shared.requestCamera { _ in
DispatchQueue.main.async {
self.initializeCamera()
}
}
return
}
captureSession.beginConfiguration()
captureSession.sessionPreset = .photo
if let captureDevice = self.captureDevice {
if let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice), captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
} else {
print("Failed to add capture device input.")
}
}
photoOutput.maxPhotoQualityPrioritization = .quality
if captureSession.canAddOutput(photoOutput) {
photoOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(photoOutput)
} else {
print("Failed to add photo output")
}
captureSession.commitConfiguration()
if let connection = photoOutput.connection(with: .video) {
connection.preferredVideoStabilizationMode = .standard
}
videolayerView.configureCaptureLayer(session: captureSession)
sessionQueue.async { [weak self] in
self?.captureSession.startRunning()
}
}
the preview layer displays black on my iPhone X. However it continues to function on other test devices. Removing:
if captureSession.canAddOutput(photoOutput) {
photoOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(photoOutput)
} else {
print("Failed to add photo output")
}
resolves the issue but this code is necessary to capture photos. Why is this not working?
This is due to camera state bug in iOS from code written prior (and since removed) that incorrectly configured the camera. Restart your device and the camera will resume functioning.

firebase text detection after I have updated my Xcode 11 and iOS 13 not working

Hello everyone I have updated my Xcode to 11 and my iPhone to iOS 13 . I am using firebase ml text detection with live camera to detect the text , and updated firebase libraries . the problem is this code now not working and show to me strange error show in the picture error image
before update this error was found I have tried many ways to solve this problem there is no hope. I hope someone help me .
//MARK:- CAMERA SETUP VIEW //=======================================================
func cameraSetUp(){
//MARK INPUT
captureSession.sessionPreset = .photo
guard let captureDevice = AVCaptureDevice.default(for: .video) else {return}
guard let input = try? AVCaptureDeviceInput.init(device: captureDevice) else {return}
captureSession.addInput(input)
captureSession.startRunning()
//MARK OUTPUT
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraview.layer.addSublayer(previewLayer)
previewLayer.frame = cameraview.layer.bounds
let dataoutput = AVCaptureVideoDataOutput()
let queue = DispatchQueue(label: "VideoQueue")
dataoutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(dataoutput)
}
//MARK:- CAMERA captureOutput //=======================================================
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
self.cameraOperation (buffer : sampleBuffer)
}
//MARK:- CAMERA Operation //=======================================================
func cameraOperation (buffer : CMSampleBuffer){
let textRecognizer = vision.onDeviceTextRecognizer()
Using_back_facing_camera (metadata : metadata)
let image = VisionImage(buffer: buffer )
image.metadata = metadata
// Using back-facing camera
self.detectText (image : image, textRecognizer: textRecognizer)
}
func Using_back_facing_camera (metadata :VisionImageMetadata){
// Using back-facing camera
let devicePosition: AVCaptureDevice.Position = .back
let deviceOrientation = UIDevice.current.orientation
switch deviceOrientation {
case .portrait:
metadata.orientation = devicePosition == .front ? .leftTop : .rightTop
case .landscapeLeft:
metadata.orientation = devicePosition == .front ? .bottomLeft : .topLeft
case .portraitUpsideDown:
metadata.orientation = devicePosition == .front ? .rightBottom : .leftBottom
case .landscapeRight:
metadata.orientation = devicePosition == .front ? .topRight : .bottomRight
case .faceDown, .faceUp, .unknown:
metadata.orientation = .leftTop
}
}
//MARK:- THIS FUNC RESPONSABLE FOR DETECT THE TEXT
func detectText (image : VisionImage , textRecognizer: VisionTextRecognizer) {
// let imageTest = VisionImage(image: UIImage(named:"test1")!)
textRecognizer.process(image) { [weak self ] result, error in
guard let self = self else {return}
guard error == nil else {return}
guard let result = result else {return}
if self.passToAnotherPage == true {
print(result.text)
self.KSSView.text = result.text
self.resultText = result.text
self.filterCamera(resultText : self.resultText)
} else {
self.resultText = ""
}
}
}
The error highlighted in the error image is harmless. There are multiple other errors in the log that may indicate issue(s) in the app's code.
Please try out the text recognition examples in the ML Kit's QuickStart mlvision sample apps available at:
https://github.com/firebase/quickstart-ios.git
It works fine on iOS 13.1 with Xcode 11.0.

AVFoundation PDF417 scanner doesn't always work

I am creating an app using Swift 4 and Xcode 9 that scans PDF417 barcodes using AVFoundation. The scanner works with some codes but doesn't recognize the PDF417 barcode that you would find on the front of a CA Lottery scratchers ticket for example.
Is there anything I am missing to make it work? Below is my code:
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
guard let captureDevice = deviceDiscoverySession.devices.first else {
print("Failed to get the camera device")
return
}
do {
captureSession = AVCaptureSession()
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession!.addInput(input)
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession!.addOutput(captureMetadataOutput)
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.pdf417]
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
} catch {
print(error)
return
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
//Get the metadata object
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if scanType.contains(metadataObj.type) {
let barCodeObj = videoPreviewLayer?.transformedMetadataObject(for: metadataObj)
if(metadataObj.stringValue != nil) {
callDelegate(metadataObj.stringValue)
captureSession?.stopRunning()
AudioServicesPlayAlertSound(SystemSoundID(kSystemSoundID_Vibrate))
navigationController?.popViewController(animated: true)
}
}
}
Thanks!
Replace your initialization code for the scanner with the following code either in your viewDidLoad or some method that you'd like it to be in
// Global vars used in init below
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
func setupCaptureInputDevice() {
let cameraMediaType = AVMediaType.video
captureSession = AVCaptureSession()
// get the video capture device, which should be of type video
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else {
// if there is an error then something is wrong, so dismiss
dismiss(animated: true, completion: nil)
return
}
let videoInput: AVCaptureDeviceInput
// create a capture input for the above device input that was created
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
// this is important to check for if we are able to add the input
// because adding before this could cause a crash or it could not happen
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
// dismiss or display error
return
}
// get ready to capture output somewhere
let metadataOutput = AVCaptureMetadataOutput()
// again check to make sure that we can do this operation before doing it
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
// setting the metadataOutput's delegate to be self and then requesting it run on the main thread
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
// specify your code type
metadataOutput.metadataObjectTypes = [.pdf417]
} else {
// dismiss or display error
return
}
// the preview layer now becomes the capture session
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
// just add it to the screen
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
// and begin input
captureSession.startRunning()
}

Couple issue with custom camera

I currently have a custom camera implemented into my application. I am running into two small issues.
1) When I switch b/w the views of the camera (front & back) the audio input dies, and only records video.
2) My method for deciding which camera view (front & back) is which, is depreciated, & I don't know how to exactly go about resolving it. For this one, the code is as follows: The depreciated part is the devices is storing as its variables. xCode is telling me: "Use AVCaptureDeviceDiscoverySession instead."
let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as! [AVCaptureDevice]
// Get the front and back-facing camera for taking photos
for device in devices {
if device.position == AVCaptureDevicePosition.back {
backFacingCamera = device
} else if device.position == AVCaptureDevicePosition.front {
frontFacingCamera = device
}
}
currentDevice = backFacingCamera
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice) else {
return
}
As for the general camera recording here are the codes:
My Variables:
let captureSession = AVCaptureSession()
var currentDevice:AVCaptureDevice?
var backFacingCamera: AVCaptureDevice?
var frontFacingCamera: AVCaptureDevice?
var videoFileOutput : AVCaptureMovieFileOutput?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?
#IBOutlet weak var recordingView: UIView!
switching cameras:
var device = AVCaptureDevice.defaultDevice(withDeviceType: .builtInWideAngleCamera, mediaType: AVMediaTypeVideo, position: .back)
func switchCameras() {
captureSession.beginConfiguration()
// Change the device based on the current camera
let newDevice = (currentDevice?.position == AVCaptureDevicePosition.back) ? frontFacingCamera : backFacingCamera
// Remove all inputs from the session
for input in captureSession.inputs {
captureSession.removeInput(input as! AVCaptureDeviceInput)
}
// Change to the new input
let cameraInput:AVCaptureDeviceInput
do {
cameraInput = try AVCaptureDeviceInput(device: newDevice)
} catch {
print(error)
return
}
if captureSession.canAddInput(cameraInput) {
captureSession.addInput(cameraInput)
}
currentDevice = newDevice
captureSession.commitConfiguration()
if currentDevice?.position == .front {
flashButton.isHidden = true
flashButton.isEnabled = false
} else if currentDevice?.position == .back {
flashButton.isHidden = false
flashButton.isEnabled = true
}
}
& In my view will appear:
mediaViewCapture.frame = CGRect(x: self.view.frame.size.width * 0, y: self.view.frame.size.height * 0, width:self.view.frame.size.width, height: self.view.frame.size.height)
self.view.addSubview(mediaViewCapture)
captureSession.sessionPreset = AVCaptureSessionPresetHigh
let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as! [AVCaptureDevice]
// Get the front and back-facing camera for taking photos
for device in devices {
if device.position == AVCaptureDevicePosition.back {
backFacingCamera = device
} else if device.position == AVCaptureDevicePosition.front {
frontFacingCamera = device
}
}
currentDevice = backFacingCamera
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice) else {
return
}
let audioInputDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio)
do
{
let audioInput = try AVCaptureDeviceInput(device: audioInputDevice)
// Add Audio Input
if captureSession.canAddInput(audioInput)
{
captureSession.addInput(audioInput)
}
else
{
NSLog("Can't Add Audio Input")
}
}
catch let error
{
NSLog("Error Getting Input Device: \(error)")
}
videoFileOutput = AVCaptureMovieFileOutput()
captureSession.addInput(captureDeviceInput)
captureSession.addOutput(videoFileOutput)
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(cameraPreviewLayer!)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
cameraPreviewLayer?.frame = mediaViewCapture.layer.frame
captureSession.startRunning()
& Finally my capture:
func capture(_ captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAt outputFileURL: URL!, fromConnections connections: [Any]!, error: Error!) {
if error == nil {
turnFlashOff()
let videoVC = VideoPreviewVC()
videoVC.url = outputFileURL
self.navigationController?.pushViewController(videoVC, animated: false)
} else {
print("Error saving the video \(error)")
}
}
You can look use AVCaptureDeviceDiscoverySession instead of AVCaptureDevice as it is deprecated following is the code for it:
let deviceDiscovery = AVCaptureDeviceDiscoverySession(deviceTypes: [AVCaptureDeviceType.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .back)
let devices = deviceDiscovery?.devices
for device in devices! {
if device.hasMediaType(AVMediaTypeVideo) {
captureDevice = device
}
}
AVCaptureDeviceType has following types: builtInMicrophone, builtInWideAngleCamera, builtInTelephotoCamera, builtInDualCamera and builtInDuoCamera.
Need to check the audioInput issue when camera is switched.