Swift - Overlay is not showing over AVCaptureVideoPreviewLayer - swift

I'm trying to put an overlay on the preview layer but for some reason the UIImageView that I add to the preview layer is not shown.
Code:
let session = AVCaptureSession()
let sessionQueue = DispatchQueue(label: AVCaptureSession.self.description(), attributes: [], target: nil)
let imgView = UIImageView(frame: CGRect(x: 0, y: 0, width: 240, height: 80))
override func viewDidLoad() {
super.viewDidLoad()
imgView.image = UIImage(contentsOfFile: "angleArm.png")
session.beginConfiguration()
let videoDevice = AVCaptureDevice.default(.builtInDualCamera, for: AVMediaType.video, position: .back)
if (videoDevice != nil) {
let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!)
if (videoDeviceInput != nil) {
if (session.canAddInput(videoDeviceInput!)) {
session.addInput(videoDeviceInput!)
}
}
}
session.commitConfiguration()
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.frame = view.frame
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.view.layer.addSublayer(previewLayer)
let preView = UIView()
preView.frame = self.view.frame
preView.addSubview(imgView)
self.view.addSubview(preView)
self.view.bringSubviewToFront(preView)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
sessionQueue.async {
self.session.startRunning()
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sessionQueue.async {
self.session.stopRunning()
}
}
I've read some posts and tried to use CALayer and add the imageView to its contents and as of now I tried creating another UIView, put the imageView in it and add it to the main view, but still no luck.
Does anyone have an idea what I'm doing wrong? Thanks in advance.

You should check and change zPosition of overlay view. I think you should put overlayView.layer.zPosition > preview.layer.zPosition.
For example code:
overlayView.layer.zPosition = preview.layer.zPosition + 1

You try set previewLayer.zPosition = -1.

Related

Capture only camerapreview in AVCapture Swift

here is my code
import UIKit
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var cameraView: UIView!
var image: UIImage!
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
}
override func viewDidAppear(_ animated: Bool) {
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
#IBAction func cameraButton_Tab(_ sender: Any) {
let settings = AVCapturePhotoSettings()
// performSegue(withIdentifier: "showPhoto_Segue", sender: nil)
photoOutput?.capturePhoto(with: settings, delegate: self)
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
}else if device.position == AVCaptureDevice.Position.front{
frontCamera = device
}
}
currentCamera = backCamera
}
func setupInputOutput() {
do{
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey : AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
}catch {
print(error)
}
}
func setupPreviewLayer() {
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer!.frame = self.cameraView.bounds
self.cameraView.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
}
extension ViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation(){
image = UIImage(data: imageData)
}
}
}
See the image,
I want save the image which background's color is yellow
I can see the camera through of that
But I save the image, it seems that save the whole view, not square.
I make the UIImageView same size of yellow UIView and save the output,
it takes the whole view capture and resize of that.
Like change rectangle to square with squeeze
How I cant catch just yellow background size and save?
This didFinishProcessingPhoto will return the complete image like what camera is seeing. You won't the image directly which is shown in your PreviewLayer. So, in order to get the UIImage of shown PreviewLayer, you can resize the captured image.
Well, resize can also be done in two ways: One keeping aspect ratio and other by passing the exact size. I would recommend to go with aspect ratio because it will ensure that your image won't be squeeze or streched from any size, while passing wrong size won't able to fulfil you requirement.
Resize UIImage passing new CGSize:
extension UIImage {
func scaleImage(toSize newSize: CGSize) -> UIImage? {
var newImage: UIImage?
let newRect = CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height).integral
UIGraphicsBeginImageContextWithOptions(newSize, false, 0)
if let context = UIGraphicsGetCurrentContext(), let cgImage = self.cgImage {
context.interpolationQuality = .high
let flipVertical = CGAffineTransform(a: 1, b: 0, c: 0, d: -1, tx: 0, ty: newSize.height)
context.concatenate(flipVertical)
context.draw(cgImage, in: newRect)
if let img = context.makeImage() {
newImage = UIImage(cgImage: img)
}
UIGraphicsEndImageContext()
}
return newImage
}
}
Usage: capturedImage.scaleImage(toSize: CGSize(width: 300, height: 300))
Resize UIImage keeping aspect ratio:
extension UIImage {
func scaleImage(toWidth newWidth: CGFloat) -> UIImage {
let scale = newWidth / self.size.width
let newHeight = self.size.height * scale
let newSize = CGSize(width: newWidth, height: newHeight)
let renderer = UIGraphicsImageRenderer(size: newSize)
let image = renderer.image { (context) in
self.draw(in: CGRect(origin: CGPoint(x: 0, y: 0), size: newSize))
}
return image
}
}
Usage: capturedImage.scaleImage(toWidth: 300)
Reference: Resize UIImage to 200x200pt/px
Update:
Keep the below method as it is in your code:
#IBAction func cameraButton_Tab(_ sender: Any) {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: self)
}
extension ViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation(){
let capturedImage = UIImage(data: imageData)
let cropImage = capturedImage.scaleImage(toWidth: cameraPreviewLayer!.frame.size.width) //It will return the Image size of Camera Preview
}
}
}

Several errors in my Swift code for a camera app

I am trying to create a camera app on Xcode 10.1 using Swift for a school project. I have been working on this for a while, and still have several errors.
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let captureSession = AVCaptureSession()
var previewLayer:CALayer!
var captureDevice:AVCaptureDevice?
var takePhoto = false
override func viewDidLoad() {
super.viewDidLoad()
prepareCamera()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
prepareCamera()
}
func prepareCamera() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
if let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.back) {
//if availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType:AVMediaType.video, position: .back).devices {
//let captureDevice = availableDevices
beginSession()
}
}
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput( device: captureDevice)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.sophiaradis.captureQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
}
#IBAction func takePhoto(_ sender: Any) {
takePhoto = true
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if takePhoto {
takePhoto = false
if let image = self.getImageFromSamplyBuffer(buffer: sampleBuffer){
let photoVC = UIStoryboard(name: "Main", bundle: nil).instantiateViewController(withIdentifier: "PhotoVC") as! PhotoViewController
photoVC.takenPhoto = image
DispatchQueue.main.async {
self.present(photoVC, animated: true, completion: {
self.stopCaptureSession()
})
}
}
}
}
func getImageFromSamplyBuffer (buffer:CMSampleBuffer) -> UIImage? {
if let pixelBuffer = CMSampleBufferGetImageBuffer(buffer) {
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext()
let imageRect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
if let image = context.createCGImage(ciImage, from: imageRect) {
return UIImage(cgImage: image, scale: UIScreen.main.scale, orientation: .right)
}
}
return nil
}
func stopCaptureSession () {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
}
There is an error in these lines of code:
if let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes:[AVCaptureDevice.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.back)
This error says that type AVCaptureDevice has no member Discovery Session. But when I looked online, it did.
There is a second error in these lines that follow that I cannot convert value of type 'AVCaptureDevice?' to expected argument type 'AVCaptureDevice'.
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput( device: captureDevice)
I have no idea how to fix this one at all. My next error occurs directly below that one, in these following lines
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
And this is flagged as that Initializer for conditional binding must have Optional type, not 'AVCaptureVideoPreviewLayer'.
If you can fix or even offer advice as how to fix any of these it will mean a lot to me and really make my year.
1-
let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.back)
2- if let captureDevice or force unwrap captureDevice!
let captureDeviceInput = try AVCaptureDeviceInput( device: captureDevice!)
3- AVCaptureVideoPreviewLayer doesn't return optional , so replace
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
with
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)

How to custom size AVcapture

I need to capture for my barcode But this my code capture is full screen.
How to custom size or fix to small size.
please let my idea or code for custom size this thank you.
This my code capture is full screen.
import UIKit
import AVFoundation
protocol BarcodeDelegate {
func barcodeReaded(barcode: String)
}
class barcodeCapViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var delegate: BarcodeDelegate?
var captureSession: AVCaptureSession!
var code: String?
override func viewDidLoad() {
super.viewDidLoad()
self.captureSession = AVCaptureSession();
let videoCaptureDevice: AVCaptureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
do {
let videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
if self.captureSession.canAddInput(videoInput) {
self.captureSession.addInput(videoInput)
} else {
print("Could not add video input")
}
let metadataOutput = AVCaptureMetadataOutput()
if self.captureSession.canAddOutput(metadataOutput) {
self.captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
metadataOutput.metadataObjectTypes = [AVMetadataObjectTypeQRCode, AVMetadataObjectTypeEAN13Code]
} else {
print("Could not add metadata output")
}
let previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.frame = self.view.layer.bounds
self.view.layer .addSublayer(previewLayer)
self.captureSession.startRunning()
} catch let error as NSError {
print("Error while creating vide input device: \(error.localizedDescription)")
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]!, fromConnection connection: AVCaptureConnection!) {
for metadata in metadataObjects {
let readableObject = metadata as! AVMetadataMachineReadableCodeObject
let code = readableObject.stringValue
if !code.isEmpty {
self.captureSession.stopRunning()
self.dismissViewControllerAnimated(true, completion: nil)
self.delegate?.barcodeReaded(code)
}
}
}
}
When I add CGRectMake(20, 40, 200, 50)
show this
Add CGRectMake(20, 40, 500, 100)
show this
I don'n know why width and height not add up follow code.
Change the frame size of your AVCaptureVideoPreviewLayer:
let previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.frame = CGRectMake(10, 20, 100, 50) // something else!
If you're using autolayout, you probably don't want to deal with CALayer frames so you should create a UIView subclass, add your AVCaptureVideoPreviewLayer to that and set the layer's frame in layoutSubviews:
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}

Swift: CoreData load my image (portrait) at 90 degrees

When I save, my image to coreData, when I re-open it from CoreData, all image who was took in portrait, are in landscape orientation.
I fund lot of previews question a bout it but all in Objective C not in Swift.
How can I fix the problem?
This is my code: ( it is also a text application when it work I will add it to my project)
This text app has two image view one for loading from library and one for loading from coreData.
class ViewController: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate{
var monimage: String!
let imagePicker = UIImagePickerController()
#IBOutlet weak var MaPhoto: UIImageView? = UIImageView()
#IBOutlet weak var maPhoto2: UIImageView! = UIImageView()
var cameraUI:UIImagePickerController = UIImagePickerController()
var yourContacts:NSMutableArray = NSMutableArray()
override func viewDidLoad() {
imagePicker.delegate = self
super.viewDidLoad()
}
#IBAction func LabraryImage(sender: AnyObject) {
imagePicker.delegate = self
imagePicker.sourceType = .PhotoLibrary
imagePicker.allowsEditing = true
presentViewController(imagePicker, animated: true, completion: nil)
}
func imagePickerControllerDidCancel(picker: UIImagePickerController) {
dismissViewControllerAnimated(true, completion: nil)
}
#IBAction func takePhoto(sender: UIButton) {
if (UIImagePickerController.isSourceTypeAvailable(.Camera)){
cameraUI = UIImagePickerController()
cameraUI.delegate = self
cameraUI.sourceType = UIImagePickerControllerSourceType.Camera
cameraUI.allowsEditing = true
self.presentViewController(cameraUI, animated: true, completion: nil)
}else{
//no camera available
let alert = UIAlertController(title: "Error", message: "There is no camera available", preferredStyle: .Alert)
alert.addAction(UIAlertAction(title: "Okay", style: .Default, handler: {(alertAction)in
alert.dismissViewControllerAnimated (true, completion: nil)
}))
}
}
func imagePickerController(picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [NSObject : AnyObject]) {
if let pickedImage = info[UIImagePickerControllerOriginalImage] as? UIImage {
MaPhoto!.contentMode = .ScaleAspectFit
MaPhoto!.image = pickedImage
}
dismissViewControllerAnimated(true, completion: nil)
}
#IBAction func btnSavePressed(sender : AnyObject) {
let appDel:AppDelegate = UIApplication.sharedApplication().delegate as! AppDelegate
let context:NSManagedObjectContext = appDel.managedObjectContext!
let ent = NSEntityDescription.entityForName("ImageData", inManagedObjectContext: context)
var newUser = ImageData (entity: ent!, insertIntoManagedObjectContext: context)
let contactImageData:NSData = UIImagePNGRepresentation(MaPhoto!.image)
newUser.monimage = contactImageData
context.save(nil)
self.navigationController?.popViewControllerAnimated(true)
}
#IBAction func loadImage(sender: AnyObject){
let appDel:AppDelegate = UIApplication.sharedApplication().delegate as! AppDelegate
let context:NSManagedObjectContext = appDel.managedObjectContext!
let request2 = NSFetchRequest (entityName: "ImageData")
request2.returnsObjectsAsFaults = false;
var results2:NSArray = context.executeFetchRequest(request2, error: nil)!
if results2.count > 0 {
for user in results2{
var thisUser2 = user as! ImageData
let profileImage:UIImage = UIImage(data: thisUser2.monimage)!
maPhoto2.image = profileImage
}
}
}
I also working to get the image square so it is for that "allowEditing is = true"
Thank s for your help!
this is the answer to my question:
#IBAction func btnSavePressed(sender : AnyObject) {
let appDelegate = UIApplication.sharedApplication().delegate as! AppDelegate
let managedContext = appDelegate.managedObjectContext!
let entity = NSEntityDescription.entityForName("ImageData",
inManagedObjectContext: managedContext)
let options = NSManagedObject(entity: entity!,
insertIntoManagedObjectContext:managedContext)
var newImageData = UIImageJPEGRepresentation(MaPhoto!.image,1)
options.setValue(newImageData, forKey: "monimage")
var error: NSError?
managedContext.save(&error)
}
JPGs are great for photos. However, saving in jpeg might loose some quality which png tends to excel as it has a lossless compression format.
Its just a matter of preference and what you need at the moment. If you don't want to convert to jpg you can call this method then convert to pngData to preserve orientation before saving to coreData. :)
func rotatedCopy() -> UIImage {
if self.imageOrientation == UIImage.Orientation.up {
return self
}
UIGraphicsBeginImageContext(size)
//draws the image in current context respecting orientation
draw(in: CGRect(origin: CGPoint.zero, size: size))
let copy = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return copy!
}

Swift images get stretched

So I am totally new to programming and swift, this is my second week of trying to code. A lot of fun but a lot of errors as well. So I want to make an app where the user can choose a photo from their gallery or make a photo using there camera, and after a press of a button, this image will get pixalised(using the Core Image function).
The problem is whenever I press the button, the image seems to get stretched, and I can't figure out why. After browsing a picture:
After pressing the button:
Thanks for any answers!
My code is as follows:
import UIKit
class ViewController: UIViewController,UIImagePickerControllerDelegate,UINavigationControllerDelegate {
#IBOutlet weak var myImageView: UIImageView!
let picker = UIImagePickerController()
func noCamera(){
let alertVC = UIAlertController(title: "No Camera", message: "Don't try it on a computer Dumbass!", preferredStyle: .Alert)
let okAction = UIAlertAction(title: "Sorry about that :(", style:.Default, handler: nil)
alertVC.addAction(okAction)
presentViewController(alertVC, animated: true, completion: nil)
}
#IBAction func photofromLibrary(sender: UIBarButtonItem) {
picker.allowsEditing = false //2
picker.sourceType = .PhotoLibrary //3
picker.modalPresentationStyle = .Popover
presentViewController(picker, animated: true, completion: nil)//4
picker.popoverPresentationController?.barButtonItem = sender
}
#IBAction func shootPhoto(sender: UIButton) {
if UIImagePickerController.availableCaptureModesForCameraDevice(.Rear) != nil {
picker.allowsEditing = false
picker.sourceType = UIImagePickerControllerSourceType.Camera
picker.cameraCaptureMode = .Photo
presentViewController(picker, animated: true, completion: nil)
} else {
noCamera()
}
}
#IBAction func pixelise(sender: UIButton) {
// 1
let ciImage = CIImage(image: myImageView.image)
// 2
var filter = CIFilter(name: "CIPixellate")
filter.setDefaults()
filter.setValue(ciImage, forKey: kCIInputImageKey)
myImageView.contentMode = .ScaleAspectFit
// 3
var outputImage = filter.outputImage
var newImage = UIImage(CIImage: outputImage)
myImageView.image = newImage
}
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
picker.delegate = self
}
//MARK: Delegates
func imagePickerController(picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [NSObject : AnyObject]) {
var chosenImage = info[UIImagePickerControllerOriginalImage] as! UIImage //2
myImageView.contentMode = .ScaleAspectFit //3
myImageView.image = chosenImage //4
dismissViewControllerAnimated(true, completion: nil) //5
}
func imagePickerControllerDidCancel(picker: UIImagePickerController) {
dismissViewControllerAnimated(true, completion: nil)
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
The process of converting CIImage to UIImage consists of creating a CIContext, then creating a CGImage using that context, and then creating a UIImage from that:
// 1
let ciImage = CIImage(image: image)
// 2
let filter = CIFilter(name: "CIPixellate")
filter.setDefaults()
filter.setValue(ciImage, forKey: kCIInputImageKey)
// 3
let context = CIContext(options: nil)
let cgImage = context.createCGImage(filter.outputImage, fromRect: CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height))
let outputImage = UIImage(CGImage: cgImage)
That yields: