Swift 3: How do I enable flash on custom AVFoundation camera? - swift

I have a very basic AVFoundation Camera that has a captureButton that will take a photo and send that photo to the secondCameraController for it to be displayed. My problem is that there is a lot of iOS 10 deprecation and I'm not sure how I add in a flash when I press the captureButton. Any help will be highly appreciated. My code is below. Thank you guys.
class CameraController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let captureSession = AVCaptureSession()
var previewLayer: CALayer!
var captureDevice: AVCaptureDevice!
var takePhoto: Bool = false
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = .white
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
prepareCamera()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
navigationController?.setNavigationBarHidden(true, animated: true)
}
let cameraView: UIView = {
let view = UIView()
view.backgroundColor = .red
return view
}()
func prepareCamera() {
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
if let availableDevices = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .back).devices {
captureDevice = availableDevices.first
beginSession()
}
}
func beginSession() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = CGRect(x: 0, y: 0, width: view.frame.width, height: view.frame.height)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
self.view.addSubview(captureButton)
let width: CGFloat = 85
captureButton.frame = CGRect(x: (previewLayer.frame.width / 2) - width / 2, y: (previewLayer.frame.height) - width - 25, width: width, height: 85)
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.cheekylabsltd.camera")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
}
func handleCapture() {
takePhoto = true
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if takePhoto {
takePhoto = false
if let image = self.getImageFromSampleBuffer(buffer: sampleBuffer) {
let secondController = SecondCameraController()
secondController.takenPhoto = image
DispatchQueue.main.async {
self.present(secondController, animated: true, completion: {
self.stopCaptureSession()
})
}
}
}
}
func getImageFromSampleBuffer(buffer: CMSampleBuffer) -> UIImage? {
if let pixelBuffer = CMSampleBufferGetImageBuffer(buffer) {
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext()
let imageRect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
if let image = context.createCGImage(ciImage, from: imageRect) {
return UIImage(cgImage: image, scale: UIScreen.main.scale, orientation: .right)
}
}
return nil
}
func stopCaptureSession() {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
lazy var captureButton: UIButton = {
let button = UIButton(type: .system)
button.backgroundColor = .white
button.layer.cornerRadius = 42.5
button.clipsToBounds = true
button.alpha = 0.40
button.layer.borderWidth = 4
button.layer.borderColor = greenColor.cgColor
button.addTarget(self, action: #selector(handleCapture), for: .touchUpInside)
return button
}()
}

Try this code :
Swift v3.0
private func flashOn(device:AVCaptureDevice)
{
do{
if (device.hasTorch)
{
try device.lockForConfiguration()
device.torchMode = .on
device.flashMode = .on
device.unlockForConfiguration()
}
}catch{
//DISABEL FLASH BUTTON HERE IF ERROR
print("Device tourch Flash Error ");
}
}
//FOR FLASH OFF CODE
private func flashOff(device:AVCaptureDevice)
{
do{
if (device.hasTorch){
try device.lockForConfiguration()
device.torchMode = .off
device.flashMode = .off
device.unlockForConfiguration()
}
}catch{
//DISABEL FLASH BUTTON HERE IF ERROR
print("Device tourch Flash Error ");
}
}
// METHOD
//private let session = AVCaptureSession()
//MARK: FLASH UITLITY METHODS
func toggleFlash() {
var device : AVCaptureDevice!
if #available(iOS 10.0, *) {
let videoDeviceDiscoverySession = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera, .builtInDuoCamera], mediaType: AVMediaTypeVideo, position: .unspecified)!
let devices = videoDeviceDiscoverySession.devices!
device = devices.first!
} else {
// Fallback on earlier versions
device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
}
if ((device as AnyObject).hasMediaType(AVMediaTypeVideo))
{
if (device.hasTorch)
{
self.session.beginConfiguration()
//self.objOverlayView.disableCenterCameraBtn();
if device.isTorchActive == false {
self.flashOn(device: device)
} else {
self.flashOff(device: device);
}
//self.objOverlayView.enableCenterCameraBtn();
self.session.commitConfiguration()
}
}
}

Swift 4
So there are two different behaviors to choose from in AVFoundation. One would be a capture device torch switch. Connect the torchSwitch action to some view and be sure to change CameraManager.shared.backDevice to your instance of the front or back device that provides the current input.
#IBAction func torchSwitch(_ sender: Any) {
guard let device = CameraManager.shared.backDevice else { return }
guard device.isTorchAvailable else { return }
do {
try device.lockForConfiguration()
device.torchMode = device.torchMode ? .off : .on
if device.torchMode == .on {
try device.setTorchModeOn(level: 0.7)
}
} catch {
debugPrint(error)
}
}
AVFoundation has deprecated device.flashMode
Now to set flash, declare a variable on camera or vc. The value here will be the default.
var flash: AVCaptureFlashMode = .off
Connect this action to some view
#IBAction func torchSwitch(_ sender: Any) { flash = flash ? .off : .on }
Then when you want to capture an image, use AVCapturePhotoOutput and prepare the photo settings. stillCameraOutput is an instance of AVCapturePhotoOutput.
let settings = AVCapturePhotoSettings()
settings.flashMode = flash
stillCameraOutput.capturePhoto(with: settings, delegate: self)

Swift 4 :
Following code is working fine for me
private enum FlashPhotoMode {
case on
case off
}
#IBOutlet weak var flashPhotoModeButton: UIButton!
#IBAction func toggleFlashPhotoMode(_ flashPhotoModeButton: UIButton ) {
sessionQueue.async {
self.flashPhotoMode = (self.flashPhotoMode == .on) ? .off : .on
let flashPhotoMode = self.flashPhotoMode
DispatchQueue.main.async {
if flashPhotoMode == .on {
self.flashPhotoModeButton.setBackgroundImage(UIImage(named: "flashON"), for: .normal)
print("flashON")
} else {
self.flashPhotoModeButton.setBackgroundImage(UIImage(named: "flashOFF"), for: .normal)
print("flashOFF")
}
}
}
}
#IBAction private func capturePhoto(_ photoButton: UIButton) {
................
.......................
if self.videoDeviceInput.device.isFlashAvailable {
if self.flashPhotoMode == .on {
photoSettings.flashMode = .on
print("FLASH ON ")
} else {
print("FLASH OFF ")
photoSettings.flashMode = .off
}
}
}
Thanks!

Related

AVCaptureDevice builtInWideAngleCamera image does not match preview

I have a Swift project where I am using a UIImageView to show a live preview capture and 'freeze' this image whenever the uses clicks on a 'Take' photo button and have this shown in another UIImageView with identical dimensions and position.
This works great on devices that are able to use builtInDualCamera (such as a iPhone X) but on devices that rely on the fallback builtInWideAngleCamera (such as a 6th gen iPad Mini), the image appears cropped/zoomed in.
Can someone explain whether it is possible/how to get an image identical to the one shown in preview using the builtInDualCamera?
A minimal reproducible example can be found below (simply create a storyboard with 2 button and 2 UIImageViews and hook them up).
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate, AVCapturePhotoCaptureDelegate {
#IBOutlet weak var cameraImageView: UIImageView!
#IBOutlet weak var userImageView: UIImageView!
var captureSession: AVCaptureSession? = AVCaptureSession()
var currentDevice: AVCaptureDevice?
var videoFileOutput: AVCaptureMovieFileOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var cameraOutput : AVCapturePhotoOutput?
func setupCamSession(){
if #available(iOS 10.0, *) {
if let device = AVCaptureDevice.default(.builtInDualCamera, for: AVMediaType.video, position:.front) {
currentDevice = device
} else if let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: AVMediaType.video, position: .front) {
currentDevice = device
}
} else {
// Fallback on earlier versions
let devices = AVCaptureDevice.devices().filter{ ($0 as AnyObject).hasMediaType(AVMediaType.video) && ($0 as AnyObject).position == AVCaptureDevice.Position.front }
if let captureDevice = devices.first {
currentDevice = captureDevice
}
}
if(currentDevice==nil)
{
print("failed")
return
}
else
{
captureSession?.sessionPreset = AVCaptureSession.Preset.medium
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice!) else {
return
}
if (captureSession?.canAddInput(captureDeviceInput))! {
captureSession?.addInput(captureDeviceInput)
cameraOutput = AVCapturePhotoOutput()
if (captureSession?.canAddOutput(cameraOutput!))! {
captureSession?.addOutput(cameraOutput!)
}
}
else
{
print("failed")
return
}
}
func startCamSession()
{
if (captureSession==nil)
{
print("Warning: no captureSession detected")
return
}
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspect
cameraPreviewLayer?.frame = cameraImageView.bounds
cameraImageView.layer.addSublayer(cameraPreviewLayer!)
if let connection = cameraPreviewLayer?.connection {
let previewLayerConnection : AVCaptureConnection = connection
if previewLayerConnection.isVideoOrientationSupported {
previewLayerConnection.videoOrientation = .portrait
cameraPreviewLayer?.frame = cameraImageView.bounds
}
captureSession?.startRunning()
}
}
func stopCamSession()
{
captureSession?.stopRunning()
}
func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if let error = error {
print(error.localizedDescription)
}
if let sampleBuffer = photoSampleBuffer, let previewBuffer = previewPhotoSampleBuffer {
let imageData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewBuffer)
let dataProvider = CGDataProvider(data: imageData! as CFData)
let cgImageRef = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.absoluteColorimetric)
let image = UIImage(cgImage: cgImageRef!, scale: 1.0, orientation: UIImage.Orientation.leftMirrored)
self.userImageView.contentMode = .scaleAspectFit
self.userImageView.image = image
cameraPreviewLayer?.removeFromSuperlayer()
self.stopCamSession()
} else {
}
}
override func viewDidLoad() {
super.viewDidLoad()
setupCamSession()
}
#IBAction func startPressed(_ sender: Any) {
startCamSession()
}
#IBAction func takePhotoPressed(_ sender: Any) {
let settings = AVCapturePhotoSettings()
let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first!
let previewFormat = [
kCVPixelBufferPixelFormatTypeKey as String : previewPixelType,
kCVPixelBufferWidthKey as String : 640,
kCVPixelBufferHeightKey as String : 480
]
settings.previewPhotoFormat = previewFormat
cameraOutput?.capturePhoto(with: settings, delegate: self)
}
}

Swift capture photos with portrait effect matte

I would like to implement a camera to capture portrait photos like apples default camera does. Portraiteffect and depnhdata is enabled in the PhotoSettings.
AVCapturePhotoSettings
self.output.isPortraitEffectsMatteDeliveryEnabled = true
self.output.isDepthDataDeliveryEnabled = true
photoSettings.isPortraitEffectsMatteDeliveryEnabled = true
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.embedsDepthDataInPhoto = true
photoSettings.embedsPortraitEffectsMatteInPhoto = true
printing the AVCapturePhoto.portraitEffectsMatte returns Optional(L008 2080x1170 v.1.1) but neither in my preview layer nor in the saved image is the portrait effect visible.
Additional context
AVCaptureDevice uses the builtInDualWideCamera
Printing the output.portraitEffekt logs also true
Full code:
class ViewController: UIViewController {
var session: AVCaptureSession?
var output = AVCapturePhotoOutput()
var previewLayer = AVCaptureVideoPreviewLayer()
private func setUpCamera(){
let session = AVCaptureSession()
if let device = AVCaptureDevice.default(.builtInDualWideCamera, for: .video, position: (useFrontCamera ? AVCaptureDevice.Position.front : AVCaptureDevice.Position.back)){
do {
let input = try AVCaptureDeviceInput(device: device)
if session.canAddInput(input){
session.addInput(input)
}
if session.canAddOutput(output){
session.addOutput(output)
}
previewLayer.videoGravity = .resizeAspectFill
previewLayer.session = session
session.startRunning()
self.session = session
}
catch {
print(error)
}
}
}
private func getSettings() -> AVCapturePhotoSettings{
var photoSettings = AVCapturePhotoSettings()
if(self.output.isPortraitEffectsMatteDeliverySupported && self.output.isDepthDataDeliverySupported){
self.output.isPortraitEffectsMatteDeliveryEnabled = true
self.output.isDepthDataDeliveryEnabled = true
photoSettings.isPortraitEffectsMatteDeliveryEnabled = true
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.embedsDepthDataInPhoto = true
photoSettings.embedsPortraitEffectsMatteInPhoto = true
}
return photoSettings
}
private funk takePhoto(){
self.output.capturePhoto(with: self.getSettings(), delegate: self)
}
}
extension ViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let data = photo.fileDataRepresentation() else { return}
let image = UIImage(data: data)
let imageView = UIImageView(image: image)
session?.stopRunning()
imageView.contentMode = .scaleAspectFill
imageView.frame = CGRect(x: 0, y: 0, width: view.frame.width/4, height: view.frame.height/4)
imageView.layer.name = "photoPreview"
view.addSubview(imageView)
UIImageWriteToSavedPhotosAlbum(image!, self, nil, nil)
}
}
```

Swift CameraView Zoom In and Out Not working

In my scenario, I am trying to create a custom CameraView. Here, Pinch Zoom In and Zoom Out are not working. How do I fix this?
Below is my code:
#IBAction func pinchToZoom(_ sender: UIPinchGestureRecognizer) {
guard let device = captureDevice else { return }
func minMaxZoom(_ factor: CGFloat) -> CGFloat { return min(max(factor, 1.0), device.activeFormat.videoMaxZoomFactor) }
func update(scale factor: CGFloat) {
do {
try device.lockForConfiguration()
defer { device.unlockForConfiguration() }
device.videoZoomFactor = factor
} catch {
debugPrint(error)
}
}
let newScaleFactor = minMaxZoom(pinch.scale * zoomFactor)
switch sender.state {
case .began: fallthrough
case .changed: update(scale: newScaleFactor)
case .ended:
zoomFactor = minMaxZoom(newScaleFactor)
update(scale: zoomFactor)
default: break
}
}
Here, Below Answer Working fine for CamerView ZoomIn and ZoomOut.
#IBAction func pinchToZoom(_ sender: UIPinchGestureRecognizer) {
let captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSession.Preset.photo
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
guard let device = captureDevice else { return }
if sender.state == .changed {
let maxZoomFactor = device.activeFormat.videoMaxZoomFactor
let pinchVelocityDividerFactor: CGFloat = 5.0
do {
try device.lockForConfiguration()
defer { device.unlockForConfiguration() }
let desiredZoomFactor = device.videoZoomFactor + atan2(sender.velocity, pinchVelocityDividerFactor)
device.videoZoomFactor = max(1.0, min(desiredZoomFactor, maxZoomFactor))
} catch {
print(error)
}
}
}

cell frame size changing on rotate

I am resizing a cell frame on the iPhone X to embed an instance of AVPlayerController. When i change orientation from portrait to landscape the frame size seems to change.
I end up with the controls (full screen + volume) overlapping the header and title.
Would you recommend a solution other than :
self.frame.insetBy
Here is a demo of how it looks :
iphone x demo
import UIKit
import AVKit
class VGMediaPlayerCell: VGBaseCell {
let statusBarHeight: CGFloat = 20
let contentOffset: CGFloat = 50
static let vgReuseIdentifier = "VGMediaPlayerCell"
static var playerIsPlaying: Bool = false
var toggleHeaderVisibility: Bool = false
public weak var delegate: VGMediaPlayerCellDelegate?
var moviePlayerController = AVPlayerViewController()
var waitingIndicator = UIActivityIndicatorView(style: UIActivityIndicatorView.Style.whiteLarge)
var containerView = UIView()
var messageLabel = UILabel()
var needAutoPlay: Bool = false
var isLoaded: Bool = false
var asset: AVAsset?
var isReadyForDisplayObserver: NSKeyValueObservation?
var content: VGContent?
let deviceOrientation = UIDevice.current.orientation
//player settings
#objc var player: AVPlayer?
var PlayerViewConroller: AVPlayerViewController?
override init(frame: CGRect) {
super.init(frame: frame)
setupWaitingIndicator()
setupMessageLabel()
isReadyForDisplayObserver = moviePlayerController.observe(\.isReadyForDisplay) { [weak self] (_, _) in
guard let `self` = self else {
return
}
// When the first frame of the video is loaded, we dismiss the waiting indicator.
DispatchQueue.main.async {
if self.moviePlayerController.isReadyForDisplay {
self.waitingStateActive(isActive: false)
}
}
}
}
override func prepareForReuse() {
super.prepareForReuse()
self.isLoaded = false
needAutoPlay = false
moviePlayerController.player = nil
content = nil
asset = nil
player = nil
contextualLabel.font = nil
messageLabel.text = nil
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
// MARK: - View creation
func setupContainerView() {
addSubview(containerView)
containerView.translatesAutoresizingMaskIntoConstraints = false
NSLayoutConstraint.activate([
containerView.leftAnchor.constraint(equalTo: leftAnchor),
containerView.rightAnchor.constraint(equalTo: rightAnchor),
containerView.topAnchor.constraint(equalTo: topAnchor),
containerView.bottomAnchor.constraint(equalTo: bottomAnchor)
])
}
func setupMessageLabel() {
addSubview(messageLabel)
messageLabel.textAlignment = .center
messageLabel.textColor = .white
messageLabel.numberOfLines = 2
messageLabel.isHidden = true
messageLabel.translatesAutoresizingMaskIntoConstraints = false
NSLayoutConstraint.activate([
messageLabel.leftAnchor.constraint(equalTo: leftAnchor, constant: 10),
messageLabel.rightAnchor.constraint(equalTo: rightAnchor, constant: -10),
messageLabel.heightAnchor.constraint(equalToConstant: 50),
messageLabel.centerYAnchor.constraint(equalTo: centerYAnchor)
])
}
func setupWaitingIndicator() {
addSubview(waitingIndicator)
waitingIndicator.translatesAutoresizingMaskIntoConstraints = false
NSLayoutConstraint.activate([
waitingIndicator.centerXAnchor.constraint(equalTo: centerXAnchor),
waitingIndicator.centerYAnchor.constraint(equalTo: centerYAnchor),
waitingIndicator.widthAnchor.constraint(equalToConstant: 100),
waitingIndicator.heightAnchor.constraint(equalToConstant: 100)
])
}
// MARK: - Utils
func configurePlayer(with viewModel: VGMediaPlayerViewModel) {
//to update message label + loader
updateUI(with: viewModel)
if viewModel.error == ErrorMessage.noNetwork.rawValue {
self.stop()
}
// Create a new AVPlayer and AVPlayerLayer
guard let url = URL(string: viewModel.content?.contentURL ?? "") else { return }
self.player = AVPlayer(url: url)
// We want video controls so we need an AVPlayerViewController
PlayerViewConroller = AVPlayerViewController()
PlayerViewConroller?.player = player
PlayerViewConroller?.videoGravity = AVLayerVideoGravity.resizeAspect
insertSubview(avPlayerViewConroller!.view, at: 0)
PlayerViewConroller!.view.topAnchor.constraint(equalTo: topAnchor).isActive = true
PlayerViewConroller!.view.leftAnchor.constraint(equalTo: leftAnchor).isActive = true
PlayerViewConroller!.view.bottomAnchor.constraint(equalTo: bottomAnchor).isActive = true
PlayerViewConroller!.view.rightAnchor.constraint(equalTo: rightAnchor).isActive = true
self.bringSubviewToFront((avPlayerViewConroller?.view!)!)
if #available(iOS 10.0, *) {
self.player?.automaticallyWaitsToMinimizeStalling = false
}
guard let asset = viewModel.avAsset else { return }
if !asset.isPlayable {
DispatchQueue.main.async {
self.waitingStateActive(isActive: false)
self.displayError(message: ErrorMessage.noPreview.rawValue)
}
}
DispatchQueue.main.async {
// Create a new AVAsset from the URL
let videoAsset = AVAsset(url: url)
// // Now we need an AVPlayerItem to pass to the AVPlayer
let videoPlayerItem = AVPlayerItem(asset: videoAsset)
// // Finally, we set this as the current AVPlayer item
self.player?.replaceCurrentItem(with: videoPlayerItem)
if self.needAutoPlay {
self.player?.play()
}
self.isLoaded = true
}
//custom insets per device orientation
// regular from for iphone 8 and downwards
// custom frame for iphone X and upwards
if UIDevice().userInterfaceIdiom == .phone {
switch UIScreen.main.nativeBounds.height {
//iPhone 5 or 5S or 5C, iPhone 6/6S/7/8, iPhone 6+/6S+/7+/8+
case 1136, 1334, 1920, 2208:
PlayerViewConroller?.view.frame = self.frame
//iPhone X, Xs, iPhone Xs Max, iPhone Xr
case 2436, 2688, 1792:
if UIApplication.shared.statusBarOrientation.isPortrait {
PlayerViewConroller?.view.frame = self.frame.insetBy(dx: 0.0, dy: 50.0)
} else if deviceOrientation == .landscapeLeft || deviceOrientation == .landscapeRight {
PlayerViewConroller?.view.frame = self.frame.insetBy(dx: 30.0, dy: 30.0)
}
default: break
}
} else {
//for the iPad
PlayerViewConroller?.view.frame = self.frame
}
//Add observer on keypath rate to monitor player's playing status
if self.toggleHeaderVisibility == true {
if UIDevice().userInterfaceIdiom == .phone {
switch UIScreen.main.nativeBounds.height {
case 2436, 2688, 1792:
player?.addObserver(self, forKeyPath: "rate", options: [.old, .new], context: nil)
default : break
}
}
}
player?.addObserver(self, forKeyPath: "rate", options: [.old, .new], context: nil)
}
override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey : Any]?, context: UnsafeMutableRawPointer?) {
if object as AnyObject? === player {
if keyPath == "rate" {
guard let rate = player?.rate else { return }
if rate > Float(0.0) {
VGMediaPlayerCell.playerIsPlaying = true
NotificationCenter.default.post(name: .playerDidStartPlay, object: nil)
} else {
VGMediaPlayerCell.playerIsPlaying = false
NotificationCenter.default.post(name: .playerDidStop, object: nil)
}
}
}
}
func updateUI(with viewModel: VGMediaPlayerViewModel) {
messageLabel.isHidden = true
//indicating waiting state with spinner
waitingStateActive(isActive: viewModel.isLoading)
}
/**
Cancel asset loading
*/
func cancelLoading() {
asset?.cancelLoading()
}
/**
Show an error with a specific message
- parameter message: A message
*/
func displayError(message: String) {
messageLabel.text = message
messageLabel.isHidden = false
containerView.isHidden = true
}
/**
Update the waiting indicator state
- parameter active: A boolean value that indicate if the waiting indicator need to be active or not.
*/
func waitingStateActive(isActive: Bool) {
isActive ? waitingIndicator.startAnimating() : waitingIndicator.stopAnimating()
containerView.isHidden = isActive
}
}

Taking a square photo with Camera App

I am currently building a camera app and want to make the camera take a square image 375x375 like Instagram and save it like that.
I am able to square off the viewfinder of the camera but it is not taking the picture the right way, also when I save it it saves it in full view. I looked around the other Q&As on there but none of them seem to work with my code.
Can someone please help me figure this out.
import Foundation
import UIKit
import AVFoundation
class CameraViewController: UIViewController{
var captureSession = AVCaptureSession()
var frontCameraDeviceInput: AVCaptureDeviceInput?
var backCameraDeviceInput: AVCaptureDeviceInput?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var image: UIImage?
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
let frontCamera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)
let backCamera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
frontCameraDeviceInput = try? AVCaptureDeviceInput(device: frontCamera!)
backCameraDeviceInput = try? AVCaptureDeviceInput(device: backCamera!)
}
func setupInputOutput() {
captureSession.addInput(backCameraDeviceInput!)
photoOutput = AVCapturePhotoOutput()
photoOutput?.isHighResolutionCaptureEnabled = true
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format:[AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
}
func setupPreviewLayer() {
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = self.view.frame
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
#IBAction func camerButton(_ sender: Any) {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: self)
}
#IBAction func switchCamera(_ sender: Any) {
captureSession.beginConfiguration()
//Change camera device inputs from back to front or opposite
if captureSession.inputs.contains(frontCameraDeviceInput!) == true {
captureSession.removeInput(frontCameraDeviceInput!)
captureSession.addInput(backCameraDeviceInput!)
} else if captureSession.inputs.contains(backCameraDeviceInput!) == true {
captureSession.removeInput(backCameraDeviceInput!)
captureSession.addInput(frontCameraDeviceInput!)
}
//Commit all the configuration changes at once
captureSession.commitConfiguration();
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "previewCameraPhoto" {
let previewVC = segue.destination as! PreviewViewController
previewVC.image = self.image
}
}
}
extension CameraViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
image = UIImage(data: imageData)
performSegue(withIdentifier: "previewCameraPhoto", sender: nil)
}
}
override var prefersStatusBarHidden: Bool
{
return true
}
}
The below lines of code are used to capture Image. I execute them when the capture button is tapped. In your case it is
func camerButton(_ sender: Any)
The definition of methods used is also there below.
DispatchQueue.global(qos: .default).async {
let videoConnection = self.imageOutput.connection(with: AVMediaType.video)
let orientation: UIDeviceOrientation = UIDevice.current.orientation
switch orientation {
case .portrait:
videoConnection?.videoOrientation = .portrait
case .portraitUpsideDown:
videoConnection?.videoOrientation = .portraitUpsideDown
case .landscapeRight:
videoConnection?.videoOrientation = .landscapeLeft
case .landscapeLeft:
videoConnection?.videoOrientation = .landscapeRight
default:
videoConnection?.videoOrientation = .portrait
}
self.imageOutput.captureStillImageAsynchronously(from: videoConnection!) { buffer, _ in
self.session.stopRunning()
guard let b = buffer
else { return }
let data = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(b)
if var image = UIImage(data: data!) {
// Crop the image if the output needs to be square.
if self.configuration.onlySquareImagesFromCamera {
image = self.cropImageToSquare(image)
}
// Flip image if taken form the front camera.
if let device = self.device, device.position == .front {
image = self.flipImage(image: image)
}
DispatchQueue.main.async {
self.didCapturePhoto?(image)
}
}
}
}
The two methods used in this function -
func cropImageToSquare(_ image: UIImage) -> UIImage {
let orientation: UIDeviceOrientation = UIDevice.current.orientation
var imageWidth = image.size.width
var imageHeight = image.size.height
switch orientation {
case .landscapeLeft, .landscapeRight:
// Swap width and height if orientation is landscape
imageWidth = image.size.height
imageHeight = image.size.width
default:
break
}
// The center coordinate along Y axis
let rcy = imageHeight * 0.5
let rect = CGRect(x: rcy - imageWidth * 0.5, y: 0, width: imageWidth, height: imageWidth)
let imageRef = image.cgImage?.cropping(to: rect)
return UIImage(cgImage: imageRef!, scale: 1.0, orientation: image.imageOrientation)
}
// Used when image is taken from the front camera.
func flipImage(image: UIImage!) -> UIImage! {
let imageSize: CGSize = image.size
UIGraphicsBeginImageContextWithOptions(imageSize, true, 1.0)
let ctx = UIGraphicsGetCurrentContext()!
ctx.rotate(by: CGFloat(Double.pi/2.0))
ctx.translateBy(x: 0, y: -imageSize.width)
ctx.scaleBy(x: imageSize.height/imageSize.width, y: imageSize.width/imageSize.height)
ctx.draw(image.cgImage!, in: CGRect(x: 0.0,
y: 0.0,
width: imageSize.width,
height: imageSize.height))
let newImage: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return newImage
}
I should not forget to give credit to the developers of this library - https://github.com/Yummypets/YPImagePicker/blob/2.5.1/Source/Camera/YPCameraVC.swift
Just add this to the image picker and the user will get the option of choosing their preferred crop ratio. the default will be as you wanted..a Square shaped photo
self.ImagePicker.allowsEditing = true