zoom in and zoom out camera on pinch gesture swift - swift

I am using front camera in my app. I want that while taking photos user can zoom in and out camera
I tried this code
let device = AVCaptureDevice.default(for: .video)
print(sender.scale)
let vZoomFactor = sender.scale * prevZoomFactor
if sender.state == .ended {
prevZoomFactor = vZoomFactor >= 1 ? vZoomFactor : 1
}
if sender.state == .changed{
do {
try device!.lockForConfiguration()
if (vZoomFactor <= device!.activeFormat.videoMaxZoomFactor) {
device!.videoZoomFactor = max(1.0, min(vZoomFactor, device!.activeFormat.videoMaxZoomFactor))
device?.unlockForConfiguration()
} else {
print("Unable to set videoZoom: (max \(device!.activeFormat.videoMaxZoomFactor), asked \(vZoomFactor))")
}
} catch {
print("\(error.localizedDescription)")
}
}
every thing is working fine in back camera but zoom is not applying on front camera.

well, after spending hours on this code i got the there where i was making the mistake.
let device = AVCaptureDevice.default(for: .video)
this will by default get the back camera and work perfect but when i switch it to front it is till considering it as back camera , so i just added a condition
if currentcam == frontcam {
let device = frontcam
//did other stuff for zooimng
}
else {
let device = AVCaptureDevice.default(for: .video)
//did other stuff for zooimng
}
this worked fine for me

Related

How to set back camera zoom level to 0.5x using Swift?

I have zoom feature working(1x onwards) for custom camera implemented using AVFoundation. This is fine till the iPhone X models. But I wanted to have 0.5x zoom in iPhone 11 and iPhone 11 Pro devices.
Code that I wrote is not working to put it to 0.5x zoom. I have tried all the possible combinations of [.builtInTripleCamera, .builtInDualWideCamera, .builtInUltraWideCamera]. The capture device with the device type .builtinUltraWideCamera is not giving 0.5 for minAvailableVideoZoomFactor.
While testing on iPhone 11, I also removed [.builtInDualCamera, .builtInTelephotoCamera, .builtInWideAngleCamera, .builtInTrueDepthCamera] from the deviceTypes.
Appreciate any help to solve this. Below is the code which is working for 1x zoom onwards.
/// Called from -handlePinchGesture
private func zoom(_ scale: CGFloat) {
let captureDevice = cameraDevice(.back)
do {
try captureDevice?.lockForConfiguration()
var minZoomFactor: CGFloat = captureDevice?.minAvailableVideoZoomFactor ?? 1.0
let maxZoomFactor: CGFloat = captureDevice?.maxAvailableVideoZoomFactor ?? 1.0
if #available(iOS 13.0, *) {
if captureDevice?.deviceType == .builtInDualWideCamera || captureDevice?.deviceType == .builtInTripleCamera || captureDevice?.deviceType == .builtInUltraWideCamera {
minZoomFactor = 0.5
}
}
zoomScale = max(minZoomFactor, min(beginZoomScale * scale, maxZoomFactor))
captureDevice?.videoZoomFactor = zoomScale
captureDevice?.unlockForConfiguration()
} catch {
print("ERROR: locking configuration")
}
}
#objc private func handlePinchGesture(_ recognizer: UIPinchGestureRecognizer) {
var allTouchesOnPreviewLayer = true
let numTouch = recognizer.numberOfTouches
for i in 0 ..< numTouch {
let location = recognizer.location(ofTouch: i, in: view)
let convertedTouch = previewLayer.convert(location, from: previewLayer.superlayer)
if !previewLayer.contains(convertedTouch) {
allTouchesOnPreviewLayer = false
break
}
}
if allTouchesOnPreviewLayer {
zoom(recognizer.scale)
}
}
func cameraDevice(_ position: AVCaptureDevice.Position) -> AVCaptureDevice? {
var deviceTypes = [AVCaptureDevice.DeviceType]()
deviceTypes.append(contentsOf: [.builtInDualCamera, .builtInTelephotoCamera, .builtInWideAngleCamera, .builtInTrueDepthCamera])
if #available(iOS 13.0, *) {
deviceTypes.append(contentsOf: [.builtInTripleCamera, .builtInDualWideCamera, .builtInUltraWideCamera])
}
let availableCameraDevices = AVCaptureDevice.DiscoverySession(deviceTypes: deviceTypes, mediaType: .video, position: position).devices
guard availableCameraDevices.isEmpty == false else {
debugPrint("ERROR: No camera devices found!!!")
return nil
}
for device in availableCameraDevices {
if device.position == position {
return device
}
}
guard let defaultDevice = AVCaptureDevice.default(for: AVMediaType.video) else {
debugPrint("ERROR: Can't initialize default back camera!!!")
return nil
}
return defaultDevice
}
Updating for people who are looking to set the optical zoom level 0.5x
courtesy: https://github.com/NextLevel/NextLevel/issues/187
public class func primaryVideoDevice(forPosition position: AVCaptureDevice.Position) -> AVCaptureDevice? {
// -- Changes begun
if #available(iOS 13.0, *) {
let hasUltraWideCamera: Bool = true // Set this variable to true if your device is one of the following - iPhone 11, iPhone 11 Pro, & iPhone 11 Pro Max
if hasUltraWideCamera {
// Your iPhone has UltraWideCamera.
let deviceTypes: [AVCaptureDevice.DeviceType] = [AVCaptureDevice.DeviceType.builtInUltraWideCamera]
let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: deviceTypes, mediaType: AVMediaType.video, position: position)
return discoverySession.devices.first
}
}
// -- Changes end
var deviceTypes: [AVCaptureDevice.DeviceType] = [AVCaptureDevice.DeviceType.builtInWideAngleCamera] // builtInWideAngleCamera // builtInUltraWideCamera
if #available(iOS 11.0, *) {
deviceTypes.append(.builtInDualCamera)
} else {
deviceTypes.append(.builtInDuoCamera)
}
// prioritize duo camera systems before wide angle
let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: deviceTypes, mediaType: AVMediaType.video, position: position)
for device in discoverySession.devices {
if #available(iOS 11.0, *) {
if (device.deviceType == AVCaptureDevice.DeviceType.builtInDualCamera) {
return device
}
} else {
if (device.deviceType == AVCaptureDevice.DeviceType.builtInDuoCamera) {
return device
}
}
}
return discoverySession.devices.first
}
The minimum "zoomFactor" property of an AVCaptureDevice can't be less than 1.0 according to the Apple Docs. It's a little confusing becuase depending on what camera you've selected, a zoom factor of 1 will be a different field of view or optical view angle. The default iPhone camera app shows a label reading "0.5" but that's just a label for the ultra wide lens in relation to the standard camera's zoom factor.
You're already getting the minZoomFactor from the device, (which will probably be 1), so you should use the device's min and max that you're reading to set the bounds of the factor you input into "captureDevice.videoZoomFactor". Then when you;ve selected the ultra wide lens, setting the zoomfactor to 1 will be as wide as you can go! (a factor of 0.5 in relation to the standard lens's field of view).
The problem is when you try to get a device of some type from discoverySession.devices it returns the default device that can be not supporting ultrawide that you need.
That was the case for me for iPhone 12Pro Max, returning only one device for Back position, reporting type BuiltInWideAngleCamera, but that was just lyes, it was the middle camera, not wide, not telephoto. Dunno why apple devs made it like that, looks like an outdated legacy architecture.
The solution was not obvious: use AVCaptureDevice.default(.builtInTripleCamera, for: .video, position: .back) to get the real device capable of zooming from 1 (your logical 0.5).
We cannot set the zoom factor to less than 1.
I resolve this issue by using ".builtInDualWideCamera".
In this case, we use "Ultra-Wide Camera" with the zoom factor 2.0 (will be the default value) equal to the normal zoom factor on the "Wide Angle Camera". (minimum value will be 1.0)
If your iPhone doesn't support ".builtInDualWideCamera", we will using ".builtInWideAngleCamera" as normally and the zoom factor is 1.0 (minimum value)
func getCameraDevices() -> [AVCaptureDevice] {
var deviceTypes = [AVCaptureDevice.DeviceType]()
if #available(iOS 13.0, *) {
deviceTypes.append(contentsOf: [.builtInDualWideCamera])
self.isUltraWideCamera = true
self.defaultZoomFactor = 2.0
}
if(deviceTypes.isEmpty){
deviceTypes.append(contentsOf: [.builtInWideAngleCamera])
self.isUltraWideCamera = false
self.defaultZoomFactor = 1.0
}
return AVCaptureDevice.DiscoverySession(deviceTypes: deviceTypes, mediaType: .video, position: .unspecified).devices
}

AVCaptureVideoPreviewLayer does not detect objects in two ranges of the screen

I downloaded Apple's project about recognizing Objects in Live Capture.
When I tried the app I saw that if I put the object to recognize on the top or on the bottom of the camera view, the app doesn't recognize the object:
In this first image the banana is in the center of the camera view and the app is able to recognize it.
image object in center
In these two images the banana is near to the camera view's border and it is not able to recognize the object.
image object on top
image object on bottom
This is how session and previewLayer are set:
func setupAVCapture() {
var deviceInput: AVCaptureDeviceInput!
// Select a video device, make an input
let videoDevice = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .back).devices.first
do {
deviceInput = try AVCaptureDeviceInput(device: videoDevice!)
} catch {
print("Could not create video device input: \(error)")
return
}
session.beginConfiguration()
session.sessionPreset = .vga640x480 // Model image size is smaller.
// Add a video input
guard session.canAddInput(deviceInput) else {
print("Could not add video device input to the session")
session.commitConfiguration()
return
}
session.addInput(deviceInput)
if session.canAddOutput(videoDataOutput) {
session.addOutput(videoDataOutput)
// Add a video data output
videoDataOutput.alwaysDiscardsLateVideoFrames = true
videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)]
videoDataOutput.setSampleBufferDelegate(self, queue: videoDataOutputQueue)
} else {
print("Could not add video data output to the session")
session.commitConfiguration()
return
}
let captureConnection = videoDataOutput.connection(with: .video)
// Always process the frames
captureConnection?.isEnabled = true
do {
try videoDevice!.lockForConfiguration()
let dimensions = CMVideoFormatDescriptionGetDimensions((videoDevice?.activeFormat.formatDescription)!)
bufferSize.width = CGFloat(dimensions.width)
bufferSize.height = CGFloat(dimensions.height)
videoDevice!.unlockForConfiguration()
} catch {
print(error)
}
session.commitConfiguration()
previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
rootLayer = previewView.layer
previewLayer.frame = rootLayer.bounds
rootLayer.addSublayer(previewLayer)
}
You can download the project here,
I am wondering if it is normal or not.
Is there any solutions to fix?
Does it take square photos to elaborate with coreml and the two ranges are not included?
Any hints? Thanks
That's probably because the imageCropAndScaleOption is set to centerCrop.
The Core ML model expects a square image but the video frames are not square. This can be fixed by setting the imageCropAndScaleOption option on the VNCoreMLRequest. However, the results may not be as good as with center crop (it depends on how the model was originally trained).
See also VNImageCropAndScaleOption in the Apple docs.

How do I get a faster data matrix code reading?

So I've been playing around with this: https://apps.apple.com/us/app/qr-code-barcode-scanner/id1048473097
barcode app. I've noticed that it's focusing near all while still maintaining autofocus. Furthermore, the object in front of the camera has nearly no motion blur (when moved around).
So I went ahead and made some changes to my barcode/QR/data matrix code to be similar to the app. The reason I did so is that their app was able to capture a REALLY small data matrix code (can't show here) while my app wasn't. After the tweaking, I am now able to read the SMALL data matrix code but not as fast as them. Mine can do it as fast (if not faster) if it's placed in the right area but theirs is more consistent and doesn't require as much "finagel-ing" to find the position for the data matrix code to then be extracted.
Below is my AVFoundation code.
view.backgroundColor = UIColor.black
captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSession.Preset.hd1920x1080;
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
do {
try videoCaptureDevice.lockForConfiguration()
} catch {
return
}
// videoCaptureDevice.focusMode = AVCaptureDevice.FocusMode.autoFocus
videoCaptureDevice.autoFocusRangeRestriction = .near
// videoCaptureDevice.activeVideoMaxFrameDuration = CMTimeMake(value: 1, timescale: 30)
// videoCaptureDevice.activeVideoMinFrameDuration = CMTimeMake(value: 1, timescale: 30)
// videoCaptureDevice.focusMode = AVCaptureDevice.FocusMode.continuousAutoFocus
// videoCaptureDevice.setFocusModeLocked(lensPosition: 0.45) { (hi) in
// print(hi)
// }
videoCaptureDevice.focusMode = .continuousAutoFocus
videoCaptureDevice.unlockForConfiguration()
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = types
} else {
failed()
return
}
types just holds the ObjectType (e.g: [.qr, .dataMatrix, .etc])
If there is anything you guys see that would make this more physically consistent with a smaller data matrix barcode, that would be awesome!

Observer is causing lag in AVFoundation captureOutput method

I have quite a specific problem but hopefully someone can help me. I'm using AVFoundation to create a video camera with a live preview. I use AVCaptureVideoDataOutput to get individual frames and AVCaptureMetadataOutput to detect a face. I'm also using Dlib's facial landmarks predictor to show the landmark points on the users face and measure the interocular distance between their eyes. Finally I'm using AVAssetWriter so that a video can be recorded.
The view controller has an ellipse shape on it so the user knows where to put their face. When the interocular distance is between a certain distance I want the ellipse to turn blue so the user knows their face is in the right place.
At the minute I've achieved this by sending a notification from my SessionHandler class to the View Controller. This works, however it's causing the frames per second in the video to drop badly. I was getting 25fps (manually set by me) and now it's ranging between 8-16.
Is there another way to notify the view controller that the ellipse should be turned green?
Here's my code where the problem is occurring. I know there's a lot going on.
// MARK: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioOutputSampleBufferDelegate
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if !currentMetadata.isEmpty {
let boundsArray = currentMetadata
.compactMap { $0 as? AVMetadataFaceObject }
.map { (faceObject) -> NSValue in
let convertedObject = output.transformedMetadataObject(for: faceObject, connection: connection)
return NSValue(cgRect: convertedObject!.bounds)
}
if user.hasDlib {
wrapper?.doWork(on: sampleBuffer, inRects: boundsArray)
// Get the interocular distance so face is the correct place in the oval
let interocularDistance = wrapper?.getEyeDistance()
//print("Interocular Distance: \(interocularDistance)")
if user.hasInterocularDistance {
if interocularDistance! < 240 || interocularDistance! > 315 {
let name = Notification.Name(rawValue: setRemoveGreenEllipse)
NotificationCenter.default.post(name: name, object: nil)
//print("face not correct distance")
if videoRecorder.isRecording {
eyeDistanceCounter += 1
//print(eyeDistanceCounter)
if eyeDistanceCounter == 30 {
cancelledByUser = false
cancelledByEyeDistance = true
videoRecorder.cancel()
eyeDistanceCounter = 0
}
}
} else {
//print("face correct distance")
eyeDistanceCounter = 0
let name = Notification.Name(rawValue: setGreenEllipse)
NotificationCenter.default.post(name: name, object: nil)
}
}
}
} else {
// Check if face is detected during recording. If it isn't, then cancel recording
if videoRecorder.isRecording {
noFaceCount += 1
if noFaceCount == 50 {
cancelledByUser = false
videoRecorder.cancel()
noFaceCount = 0
}
}
}
if layer.status == .failed {
layer.flush()
}
layer.enqueue(sampleBuffer)
let writable = videoRecorder.canWrite()
if writable {
if videoRecorder.sessionAtSourceTime == nil {
// Start Writing
videoRecorder.sessionAtSourceTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
videoRecorder.videoWriter.startSession(atSourceTime: videoRecorder.sessionAtSourceTime!)
print("video session started")
}
if videoRecorder.videoWriterInput.isReadyForMoreMediaData {
// write video buffer
videoRecorder.videoWriterInput.append(sampleBuffer)
//print("video buffering")
}
}
}
You could probably call the notification once per 30 frames, for example, instead of every frame.
You could also call the color changing function directly if it's in the same view controller. If not, you could define a delegate method and call it directly as oppose to sending notifications.

FPS issues with adding a blur using SKEffectNode

If there a better way of creating a blur effect? It seems like the way I am currently doing it creates FPS issues especially on older phones. It seems like the higher the blurAmount the lower the FPS. Could the blendMode be the reason here?
if effectsNode.parent == nil {
let filter = CIFilter(name: "CIGaussianBlur")
let blurAmount = 15.0
filter!.setValue(blurAmount, forKey: kCIInputRadiusKey)
effectsNode.filter = filter
effectsNode.blendMode = .add
sceneContent.removeFromParent()
effectsNode.addChild(sceneContent)
addChild(effectsNode)
}
When I pause my game, I call blurScreen() which does the following code above. However, it seems like my fps drops over time the longer the game is paused. I tried taking blurScreen() out and the FPS issues went away. How is the FPS dropping over time when blurScreen() is only called once?
EDIT:
func pauseGame() {
sceneContent.isPaused = true
intermission = true
physicsWorld.speed = 0
blurScreen()
}
Here is the code in touchesEnded()
// Tapped pause or pause menu options
if name == "pause" && touch.tapCount == 1 && pauseSprite.alpha == 1.0 && ((!sceneContent.isPaused && !GameData.shared.midNewDay) || (!sceneContent.isPaused && sceneElements[0].editingMode)) {
SKTAudio.sharedInstance.pauseBackgroundMusic()
SKTAudio.sharedInstance.playSoundEffect("Sounds/pause.wav")
pauseSprite.run(SKAction.sequence([SKAction.scale(to: 1.2, duration: 0.10), SKAction.scale(to: 1.0, duration: 0.10)])) { [unowned self] in
self.createPauseMenu()
self.pauseGame()
}
return
}
Update method
override func update(_ currentTime: TimeInterval) {
if GameData.shared.firstTimePlaying && GameData.shared.distanceMoved > 600 && !step1Complete {
tutorial2()
}
// Check for game over
if GameData.shared.hearts == 0 && !gameEnded {
gameOver()
}
// If we're in intermission, do nothing
if intermission || sceneContent.isPaused {
return
}
// some more stuff unrelated to pausing
}
You are running an effect node on the entire scene, that scene is going to be rendering that effect every frame which is going to put a lot of work on your system. If you do not have any animations going on behind it, I would recommend
converting your effect node to a sprite node by doing this
var spriteScene : SKSpriteNode!
func blurScreen() {
DispatchQueue.global(qos: .background).async {
[weak self] in
guard let strongSelf = self else { return }
let effectsNode = SKEffectNode()
let filter = CIFilter(name: "CIGaussianBlur")
let blurAmount = 10.0
filter!.setValue(blurAmount, forKey: kCIInputRadiusKey)
effectsNode.filter = filter
effectsNode.blendMode = .add
strongSelf.sceneContent.removeFromParent()
effectsNode.addChild(strongSelf.sceneContent)
let texture = self.view!.texture(from: effectsNode)
strongSelf.spriteScene = SKSpriteNode(texture: texture)
strongSelf.spriteScene.anchorPoint = CGPoint(x: 0.5, y: 0.5)
DispatchQueue.main.async {
strongSelf.sceneContent.removeFromParent()
strongSelf.addChild(strongSelf.spriteScene)
}
}
}