App Only Crashes for Iphone 6 and IPad in IOS 11 using Vision & Machine Learning API - swift

I made a live translation app that identifies an object and translates it using the user's camera. It works just fine on my iPhone 6s and doesn't crash in any of the simulators, but when I run it on an iPhone 6, it crashes as soon I try to segue to the camera feed. Apple also says it crashes on the iPad as well.
Do certain devices just not support Vision API or is something wrong with my code?
import UIKit
import AVKit
import Vision
var lang = ""
var lang2 = ""
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, AVCapturePhotoCaptureDelegate {
#IBAction func screenshotB(_ sender: Any) {
//screenshot camera screen view
}
#IBOutlet weak var screenshotBOutlet: UIButton!
#IBOutlet weak var swirlyGuy: UIActivityIndicatorView!
#IBOutlet weak var title1: UILabel!
#IBOutlet weak var settingsButtonOutlet: UIButton!
#IBOutlet weak var launchScreen: UIViewX!
#IBOutlet weak var launchScreenLogo: UIImageView!
func stopSwirlyGuy(){
swirlyGuy.stopAnimating()
}
let identifierLabel: UILabel = {
let label = UILabel()
label.backgroundColor = UIColor(red: 0, green: 0, blue:0, alpha: 0.4)
label.textColor = .white
label.textAlignment = .center
label.translatesAutoresizingMaskIntoConstraints = false
return label
}()
#IBAction func prepareForUnwind (segue:UIStoryboardSegue) {
}
override func viewDidLoad() {
super.viewDidLoad()
launchScreen.alpha = 1
launchScreenLogo.alpha = 1
swirlyGuy.startAnimating()
// start up the camera
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .hd4K3840x2160
guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
setupIdentifierConfidenceLabel()
setupSettingsButton()
setupTitle()
setupSwirlyGuy()
setupScreenshot()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
UIView.animate(withDuration: 1.5) {
self.launchScreen.alpha = 0
self.launchScreenLogo.alpha = 0
}
}
fileprivate func setupSettingsButton() {
view.addSubview(settingsButtonOutlet)
}
fileprivate func setupScreenshot() {
view.addSubview(screenshotBOutlet)
}
fileprivate func setupSwirlyGuy() {
view.addSubview(swirlyGuy)
}
fileprivate func setupTitle() {
view.addSubview(title1)
}
fileprivate func setupIdentifierConfidenceLabel() {
view.addSubview(identifierLabel)
identifierLabel.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
identifierLabel.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
identifierLabel.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
identifierLabel.heightAnchor.constraint(equalToConstant: 100).isActive = true
identifierLabel.numberOfLines = 0
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// print("Camera was able to capture a frame:", Date())
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// model
guard let model = try? VNCoreMLModel(for: Resnet50().model) else { return }
let request = VNCoreMLRequest(model: model) { (finishedReq, err) in
//perhaps check the err
// print(finishedReq.results)
guard let results = finishedReq.results as? [VNClassificationObservation] else { return }
guard let firstObservation = results.first else { return }
print(firstObservation.identifier, firstObservation.confidence)
let x = (firstObservation.confidence)
let y = (x * 10000).rounded() / 10000
let z = (firstObservation.identifier)
let s = (self.translateSpanish(object1: firstObservation.identifier))
let f = (self.translateFrench(object1: firstObservation.identifier))
// var lang = ""
// var lang2 = ""
if language == "English" {
lang = z
}
else if language == "Spanish" {
lang = s
}
else {
lang = f
}
if language2 == "Spanish" {
lang2 = s
}
else if language2 == "English" {
lang2 = z
}
else {
lang2 = f
}
DispatchQueue.main.async {
self.identifierLabel.text = "\(lang)" + " = " + "\(lang2) \n \(y * 100)% accuracy"
self.stopSwirlyGuy()
}
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
//Translation fucntions omitted for brevity
This is the code for the view controller that segues into the main screen where the camera feed and Vision processing take place.
import UIKit
class FirstLaunchViewController: UIViewController {
#IBOutlet weak var title1: UILabelX!
#IBOutlet weak var logo1: UIImageView!
#IBOutlet weak var description1: UILabel!
#IBOutlet weak var buttonOutlet: UIButtonX!
#IBOutlet weak var initialBackground: UIViewX!
#IBOutlet weak var initialLogo: UIImageView!
#IBAction func toVC(_ sender: Any) {
UserDefaults.standard.set(false, forKey: "name")
performSegue(withIdentifier: "toMain", sender: self)
}
override func viewDidLoad() {
super.viewDidLoad()
initialLogo.alpha = 1
initialBackground.alpha = 1
title1.alpha = 0
logo1.alpha = 0
description1.alpha = 0
buttonOutlet.alpha = 0
// Do any additional setup after loading the view.
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
UIView.animate(withDuration: 1.5, animations: {
self.initialLogo.alpha = 0
self.initialBackground.alpha = 0
}) { (true) in
self.initialBackgroundGone()
}
}
func initialBackgroundGone() {
UIView.animate(withDuration: 1.5, animations: {
self.title1.alpha = 1
}) { (true) in
self.showBackgroundAgain()
}
}
func showBackgroundAgain() {
UIView.animate(withDuration: 1.3, animations: {
self.logo1.alpha = 1
}) { (true) in
self.showTitle()
}
}
func showTitle() {
UIView.animate(withDuration: 1.5, animations: {
self.description1.alpha = 1
}) { (true) in
self.showEverythingElse()
}
}
func showEverythingElse() {
UIView.animate(withDuration: 3.5) {
self.buttonOutlet.alpha = 1
}
}
}

This is a lot of code but I think your issue comes from the video preset your are using as iPhone 6 doesn't have support for 4K video recording.
When setting the session preset you should test that it is supported by all the targeted devices:
if captureSession.canSetSessionPreset(.hd4K3840x2160) {
captureSession.sessionPreset = .hd4K3840x2160
} else {
captureSession.sessionPreset = .high // or any other preset that suits your needs
}

Related

AVCaptureDevice builtInWideAngleCamera image does not match preview

I have a Swift project where I am using a UIImageView to show a live preview capture and 'freeze' this image whenever the uses clicks on a 'Take' photo button and have this shown in another UIImageView with identical dimensions and position.
This works great on devices that are able to use builtInDualCamera (such as a iPhone X) but on devices that rely on the fallback builtInWideAngleCamera (such as a 6th gen iPad Mini), the image appears cropped/zoomed in.
Can someone explain whether it is possible/how to get an image identical to the one shown in preview using the builtInDualCamera?
A minimal reproducible example can be found below (simply create a storyboard with 2 button and 2 UIImageViews and hook them up).
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate, AVCapturePhotoCaptureDelegate {
#IBOutlet weak var cameraImageView: UIImageView!
#IBOutlet weak var userImageView: UIImageView!
var captureSession: AVCaptureSession? = AVCaptureSession()
var currentDevice: AVCaptureDevice?
var videoFileOutput: AVCaptureMovieFileOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var cameraOutput : AVCapturePhotoOutput?
func setupCamSession(){
if #available(iOS 10.0, *) {
if let device = AVCaptureDevice.default(.builtInDualCamera, for: AVMediaType.video, position:.front) {
currentDevice = device
} else if let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: AVMediaType.video, position: .front) {
currentDevice = device
}
} else {
// Fallback on earlier versions
let devices = AVCaptureDevice.devices().filter{ ($0 as AnyObject).hasMediaType(AVMediaType.video) && ($0 as AnyObject).position == AVCaptureDevice.Position.front }
if let captureDevice = devices.first {
currentDevice = captureDevice
}
}
if(currentDevice==nil)
{
print("failed")
return
}
else
{
captureSession?.sessionPreset = AVCaptureSession.Preset.medium
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice!) else {
return
}
if (captureSession?.canAddInput(captureDeviceInput))! {
captureSession?.addInput(captureDeviceInput)
cameraOutput = AVCapturePhotoOutput()
if (captureSession?.canAddOutput(cameraOutput!))! {
captureSession?.addOutput(cameraOutput!)
}
}
else
{
print("failed")
return
}
}
func startCamSession()
{
if (captureSession==nil)
{
print("Warning: no captureSession detected")
return
}
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspect
cameraPreviewLayer?.frame = cameraImageView.bounds
cameraImageView.layer.addSublayer(cameraPreviewLayer!)
if let connection = cameraPreviewLayer?.connection {
let previewLayerConnection : AVCaptureConnection = connection
if previewLayerConnection.isVideoOrientationSupported {
previewLayerConnection.videoOrientation = .portrait
cameraPreviewLayer?.frame = cameraImageView.bounds
}
captureSession?.startRunning()
}
}
func stopCamSession()
{
captureSession?.stopRunning()
}
func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if let error = error {
print(error.localizedDescription)
}
if let sampleBuffer = photoSampleBuffer, let previewBuffer = previewPhotoSampleBuffer {
let imageData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewBuffer)
let dataProvider = CGDataProvider(data: imageData! as CFData)
let cgImageRef = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.absoluteColorimetric)
let image = UIImage(cgImage: cgImageRef!, scale: 1.0, orientation: UIImage.Orientation.leftMirrored)
self.userImageView.contentMode = .scaleAspectFit
self.userImageView.image = image
cameraPreviewLayer?.removeFromSuperlayer()
self.stopCamSession()
} else {
}
}
override func viewDidLoad() {
super.viewDidLoad()
setupCamSession()
}
#IBAction func startPressed(_ sender: Any) {
startCamSession()
}
#IBAction func takePhotoPressed(_ sender: Any) {
let settings = AVCapturePhotoSettings()
let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first!
let previewFormat = [
kCVPixelBufferPixelFormatTypeKey as String : previewPixelType,
kCVPixelBufferWidthKey as String : 640,
kCVPixelBufferHeightKey as String : 480
]
settings.previewPhotoFormat = previewFormat
cameraOutput?.capturePhoto(with: settings, delegate: self)
}
}

Swift - Visionkit How to edit the color of the buttons "Keep Scan" and "Retake"

I have a problem.
I've implemented the visionkit framework in order to detect a text when the camera is used.
The problem is that the colors of the buttons: "Keep Scan" and "Retake" are blue. I want them in black color. How can I fix this problem? I paste my code.
Thank you in advance!
import UIKit
import Vision
import VisionKit
class ScanText: UIViewController, VNDocumentCameraViewControllerDelegate {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var TextScan: UITextView!
#IBOutlet weak var buttonStartShape: UIButton!
#IBOutlet weak var infoScattaUnaFoto: UILabel!
#IBOutlet weak var copyButtonShape: UIButton!
var textRecognitionRequest = VNRecognizeTextRequest(completionHandler: nil)
private let textRecognitionWorkQueue = DispatchQueue(label: "MyVisionScannerQueue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
var classView = ViewController()
var arrayText = String()
override func viewDidLoad() {
super.viewDidLoad()
imageView.layer.cornerRadius = 15
imageView.clipsToBounds = true
TextScan.layer.cornerRadius = 15
TextScan.clipsToBounds = true
copyButtonShape.layer.cornerRadius = 25
copyButtonShape.clipsToBounds = true
buttonStartShape.layer.cornerRadius = 25
buttonStartShape.clipsToBounds = true
TextScan.isEditable = true
setupVision()
}
#IBAction func TakePicture(_ sender: Any) {
let scannerViewController = VNDocumentCameraViewController()
scannerViewController.delegate = self
present(scannerViewController, animated: true)
}
private func setupVision() {
textRecognitionRequest = VNRecognizeTextRequest { (request, error) in
guard let observations = request.results as? [VNRecognizedTextObservation] else { return }
var detectedText = ""
for observation in observations {
guard let topCandidate = observation.topCandidates(2).first else { return }
print("text \(topCandidate.string) has confidence \(topCandidate.confidence)")
detectedText += topCandidate.string
detectedText += "\n"
}
DispatchQueue.main.async {
self.TextScan.isHidden = false
self.infoScattaUnaFoto.isHidden = true
self.copyButtonShape.isHidden = false
self.buttonStartShape.setTitle("Ripeti", for: .normal)
self.TextScan.text += detectedText
self.TextScan.flashScrollIndicators()
}
}
textRecognitionRequest.recognitionLevel = .accurate
}
private func processImage(_ image: UIImage) {
imageView.image = image
recognizeTextInImage(image)
}
private func recognizeTextInImage(_ image: UIImage) {
guard let cgImage = image.cgImage else { return }
TextScan.text = ""
textRecognitionWorkQueue.async {
let requestHandler = VNImageRequestHandler(cgImage: cgImage, options: [:])
do {
try requestHandler.perform([self.textRecognitionRequest])
} catch {
print(error)
}
}
}
func documentCameraViewController(_ controller: VNDocumentCameraViewController, didFinishWith scan: VNDocumentCameraScan) {
/* guard scan.pageCount >= 1 else {
controller.dismiss(animated: true)
return
}*/
for i in 0 ..< scan.pageCount {
let img = scan.imageOfPage(at: i)
processImage(img)
}
let originalImage = scan.imageOfPage(at: 0)
print(originalImage)
//let newImage = compressedImage(originalImage)
controller.dismiss(animated: true)
}
func documentCameraViewController(_ controller: VNDocumentCameraViewController, didFailWithError error: Error) {
print(error)
controller.dismiss(animated: true)
}
func documentCameraViewControllerDidCancel(_ controller: VNDocumentCameraViewController) {
controller.dismiss(animated: true)
}
func compressedImage(_ originalImage: UIImage) -> UIImage {
guard let imageData = originalImage.jpegData(compressionQuality: 1),
let reloadedImage = UIImage(data: imageData) else {
return originalImage
}
return reloadedImage
}
//MARK: By tapping on this button, I pass all the data.
#IBAction func CopyText(_ sender: Any) {
let vc = (storyboard?.instantiateViewController(identifier: "SpeakDetail") as? ViewController)!
vc.textscannerized = TextScan.text
self.navigationController?.pushViewController(vc, animated: true)
}
}
You can try something like this:
scannerViewController.view.tintColor = .red

I'm getting the error "unrecognized selector sent to instance"

When I try to press on any button on my calculator it gives me the error unrecognized selector sent to instance 0x7f7f85e04e40 I updated this code from Swift 2 to Swift 4 but back then it used to work. What's wrong with the code?
import UIKit
class Valuta: UIViewController {
#IBOutlet weak var euro: UILabel!
#IBOutlet weak var zloty: UILabel!
#IBOutlet weak var topButton: UIButton!
#IBOutlet weak var bottomButton: UIButton!
var valutas : [ValutaWaarde] = []
var number = 0;
var isTypingNumber = false
var currentNumber = 0;
var currency = "PLN";
#IBAction func numberTapped(sender: AnyObject) {
let number = sender.currentTitle!
var disable = false
if number == "."{
if euro.text!.range(of: ".") != nil{
disable = true
}
}
if(disable == false){
if isTypingNumber {
euro.text = euro.text!+number!
} else {
euro.text = number
isTypingNumber = true
}
var output : Float;
if(currency == "EUR"){
output = (euro.text! as NSString).floatValue*Float((valutas[0].rates?.pLN!)!)
}
else{
output = (euro.text! as NSString).floatValue/Float((valutas[0].rates?.pLN!)!)
}
zloty.text = String(format: "%.2f", output)
}
}
#IBAction func resetCalculator(sender: AnyObject) {
isTypingNumber = false;
zloty.text = "0";
euro.text = "0";
}
override func viewDidLoad() {
super.viewDidLoad()
UINavigationBar.appearance().isTranslucent = false;
UINavigationBar.appearance().barTintColor = UIColor(red: CGFloat(51/255.0), green: 51/255, blue: 51/255, alpha: 1)
UINavigationBar.appearance().titleTextAttributes = [NSAttributedStringKey.foregroundColor:UIColor.white]
//Get list
let url = URL(string: "https://api.fixer.io/latest?symbols=PLN&base=EUR")
var request = NSMutableURLRequest(url: url! as URL, cachePolicy: NSURLRequest.CachePolicy.returnCacheDataElseLoad, timeoutInterval: Double.infinity)
if Reachability.isConnectedToNetwork(){
request = NSMutableURLRequest(url: url! as URL, cachePolicy: NSURLRequest.CachePolicy.useProtocolCachePolicy, timeoutInterval: Double.infinity);
}
let session = URLSession.shared
let task = session.dataTask(with: request as URLRequest,
completionHandler: { data, response, error -> Void in
do {
if let json = try JSONSerialization.jsonObject(with: data!) as? [[String: String]] {
for valutaData in json {
let valuta = ValutaWaarde(dictionary: valutaData as NSDictionary)
self.valutas.append(valuta!)
}
}
} catch { print(error) }
})
task.resume()
}
...
}
Try to re-connect #IBOutlets and #IBActions in storyboard.

How to clear the information from the Label.text?

I am developing a QR code reader application. Hier is my code:
var captureSession: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var qrCodeframeView: UIView?
#IBOutlet weak var CancelButton: UIButton!
#IBOutlet weak var Label: UILabel!
override func viewDidLoad() {
CancelButton.hidden = true
Label.hidden = true
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
#IBAction func ScanMe(sender: AnyObject) {
CancelButton.hidden = false
Label.hidden = false
let captureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
var error: NSError?
let input: AnyObject!
do {
input = try AVCaptureDeviceInput (device: captureDevice)
} catch let error1 as NSError{
error = error1
input = nil
}
if (error != nil){
print ("\(error?.localizedDescription)")
return
}
captureSession = AVCaptureSession()
captureSession?.addInput(input as! AVCaptureInput)
let captureMetadatOutput = AVCaptureMetadataOutput()
captureSession?.addOutput(captureMetadatOutput)
captureMetadatOutput.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
captureMetadatOutput.metadataObjectTypes = [AVMetadataObjectTypeQRCode]
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
view.bringSubviewToFront(Label)
view.bringSubviewToFront(CancelButton)
qrCodeframeView = UIView()
qrCodeframeView?.layer.borderColor = UIColor.greenColor().CGColor
qrCodeframeView?.layer.borderWidth = 2
view.addSubview(qrCodeframeView!)
view.bringSubviewToFront(qrCodeframeView!)
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]!, fromConnection connection: AVCaptureConnection!) {
if metadataObjects == nil || metadataObjects.count == 0 {
qrCodeframeView?.frame = CGRectZero
Label.text = "No QR code detected"
return
}
let metadateObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if metadateObj.type == AVMetadataObjectTypeQRCode {
let BarcodeObject = videoPreviewLayer?.transformedMetadataObjectForMetadataObject(metadateObj as AVMetadataMachineReadableCodeObject) as! AVMetadataMachineReadableCodeObject
qrCodeframeView?.frame = BarcodeObject.bounds
if metadateObj.stringValue != nil {
Label.text = metadateObj.stringValue
captureSession?.stopRunning()
}
}
}
#IBAction func Cancel(sender: AnyObject) {
CancelButton.hidden = true
Label.hidden = true
captureSession?.stopRunning()
qrCodeframeView?.removeFromSuperview()
videoPreviewLayer?.removeFromSuperlayer()
}
#IBAction func Open(sender: AnyObject) {
}
override func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject?) {
var PC : SecondSecondViewController = segue.destinationViewController as! SecondSecondViewController
PC.label1 = Label.text!
}
}
The problem is when I click the cancel button and go back to the previous viewcontroller, when I reopen the qr code scanner I see the last scanned code, displayed in the Label.text. Would you please help me how to clear the label and do not display the old code, because these codes must be used only once and if the users see the codes they will be able to use them again.
If you want without adding other callbacks:
override func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject?) {
var PC : SecondSecondViewController = segue.destinationViewController as! SecondSecondViewController
PC.label1 = Label.text!
Label.text = ""
}
otherwise you have to catch the unwind segue when you come back from SecondSecondViewController and there set Label.text = ""
Just add this to where you exit from the QR (press cancel):
label.text = ""

Accessing Front Camera in Swift 2.1/iOS 9.1

I am trying to make a selfie app. I am unable to access the front camera. When I run the below code, the app still picks only the back camera.I am working on Xcode 7.1.1. I have also tried it on Xcode 7.2 Beta. Can someone help ? Tried solutions given in the other question here, but that did not solve the problem.
import UIKit
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var previewView: UIView!
#IBOutlet weak var capturedImage: UIImageView!
var captureSession: AVCaptureSession?
var stillImageOutput: AVCaptureStillImageOutput?
var previewLayer: AVCaptureVideoPreviewLayer?
enum AVCaptureDevicePosition : Int {
case Unspecified
case Front
case Back
}
func viewDidAppear(animated: Bool){
super.viewDidAppear(animated)
previewLayer!.frame = previewView.bounds
reloadCamera()
}
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
}
func reloadCamera() {
captureSession = AVCaptureSession()
captureSession!.sessionPreset = AVCaptureSessionPresetPhoto
//let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
// access the front camera
let videoDevices = AVCaptureDevice.devicesWithMediaType(AVMediaTypeVideo)
var captureDevice:AVCaptureDevice
for device in videoDevices{
let device = device as AVCaptureDevice
if device.position == AVCaptureDevicePosition.Front {
captureDevice = device
}
}
var error: NSError?
var input: AVCaptureDeviceInput!
do {
input = try AVCaptureDeviceInput(device : captureDevice)
} catch let error1 as NSError {
error = error1
input = nil
}
if error == nil && captureSession!.canAddInput(input) {
captureSession!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput!.outputSettings = [AVVideoCodecKey :AVVideoCodecJPEG]
if captureSession!.canAddOutput(stillImageOutput){
captureSession!.addOutput(stillImageOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer!.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer!.connection?.videoOrientation = AVCaptureVideoOrientation.Portrait
previewView.layer.addSublayer(previewLayer!)
captureSession!.startRunning()
}
}
}
#IBAction func didPressTakePhoto(sender: UIButton) {
if let videoConnection = stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo){
videoConnection.videoOrientation = AVCaptureVideoOrientation.Portrait
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: {(sampleBuffer, error) in
if (sampleBuffer != nil){
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let dataProvider = CGDataProviderCreateWithCFData(imageData)
let cgImageRef = CGImageCreateWithJPEGDataProvider(dataProvider,nil,true,CGColorRenderingIntent.RenderingIntentDefault)
let image = UIImage(CGImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.Right)
self.capturedImage.image = image
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
}
})
}
}
#IBAction func didPressTakeAnother(sender : AnyObject){
captureSession!.startRunning()
}
}