MacOS take a Screenshot with AVFoundation - swift

I have Swift code:
func makeSceenshot() {
// Set up the inputs
var displayCount: UInt32 = 0;
var result = CGGetActiveDisplayList(0, nil, &displayCount)
if (result != CGError.success) {
print("error when try to get displays: \(result)")
return
}
let allocated = Int(displayCount)
let activeDisplays = UnsafeMutablePointer<CGDirectDisplayID>.allocate(capacity: allocated)
result = CGGetActiveDisplayList(displayCount, activeDisplays, &displayCount)
if (result != CGError.success) {
print("error whe try get displays list: \(result)")
return
}
for i in 1...displayCount {
// Set up the input
let display: CGDirectDisplayID = activeDisplays[Int(i-1)]
guard let input = AVCaptureScreenInput(displayID: display) else {
print("error AVCaptureScreenInput")
return
}
input.minFrameDuration = CMTimeMake(value: 1, timescale: Int32(FPS))
let session = AVCaptureSession()
if session.canAddInput(input) {
print("Input added")
session.addInput(input)
} else {
print("Input error")
}
// Set up the output
let output = AVCapturePhotoOutput()
let screenshotSettings = AVCapturePhotoSettings()
if session.canAddOutput(output) {
print("Output added")
session.addOutput(output)
} else {
print("Output error")
}
session.startRunning()
output.capturePhoto(with: screenshotSettings, delegate: self)
session.stopRunning()
}
}
}
And delegate:
extension ViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if (error != nil) {
print("\(String(describing: error?.localizedDescription)) debug: \(error.debugDescription)")
return
}
guard let imageData = photo.cgImageRepresentation() else {
print("eror")
return
}
let unixTimestamp = CreateTimeStamp()
let path = folderName + "\(unixTimestamp)" + ".jpg"
let fileUrl = URL(fileURLWithPath: path, isDirectory: true)
let bitmapRep = NSBitmapImageRep(cgImage: imageData.takeUnretainedValue())
let jpegData = bitmapRep.representation(using: NSBitmapImageRep.FileType.jpeg, properties: [:])!
do {
try jpegData.write(to: fileUrl, options: .atomic)
}
catch {print("error: \(error)")}
}
func CreateTimeStamp() -> Int32
{
return Int32(Date().timeIntervalSince1970)
}
}
And i have a problem: When i click on the Button, i need to make screenshots from all of Mac's displays. But when i click on the button, i get this message in Logs:
Optional("The operation could not be completed") debug: Optional(Error Domain=AVFoundationErrorDomain Code=-11800 "The operation could not be completed" UserInfo={NSLocalizedDescription=The operation could not be completed, NSLocalizedFailureReason=An unknown error occurred (-11800)})
But this function
makeScreenshot()
is working if i launch it from viewDidLoad() but it doesn't launch from button action
Any ideas?

You can use:
let id = CGMainDisplayID()
let image = CGDisplayCreateImage(id)
Then, an alert will pop up asking the user to give your App screen recording permission. When the user adds your App to the list in the Security and Privacy Settings, CGDisplayCreateImage will capture all windows and the whole screen.

Related

Images not saving in Custom Folder in Gallery in iOS swift

I am trying to save my images in custom folder in iPhone gallery but instead of saving original image it saves only white blank image on real device. but when I run same code on simulator its working fine.
the code I am trying is :
import Photos
class CustomPhotoAlbum: NSObject {
static let albumName = "App Name" // here put your album name
static let sharedInstance = CustomPhotoAlbum()
var assetCollection: PHAssetCollection!
private var collection: PHFetchResult<PHAssetCollection>!
override init() {
super.init()
if let assetCollection = fetchAssetCollectionForAlbum() {
self.assetCollection = assetCollection
return
}
if PHPhotoLibrary.authorizationStatus() != PHAuthorizationStatus.authorized {
PHPhotoLibrary.requestAuthorization({ (status: PHAuthorizationStatus) -> Void in
()
})
}
if PHPhotoLibrary.authorizationStatus() == PHAuthorizationStatus.authorized {
self.createAlbum()
} else {
PHPhotoLibrary.requestAuthorization(requestAuthorizationHandler)
}
}
func requestAuthorizationHandler(status: PHAuthorizationStatus) {
if PHPhotoLibrary.authorizationStatus() == PHAuthorizationStatus.authorized {
// ideally this ensures the creation of the photo album even if authorization wasn't prompted till after init was done
print("trying again to create the album")
self.createAlbum()
} else {
print("should really prompt the user to let them know it's failed")
}
}
func createAlbum() {
PHPhotoLibrary.shared().performChanges({
PHAssetCollectionChangeRequest.creationRequestForAssetCollection(withTitle: CustomPhotoAlbum.albumName) // create an asset collection with the album name
}) { success, error in
if success {
self.assetCollection = self.fetchAssetCollectionForAlbum()
} else {
print("error \(error)")
}
}
}
func fetchAssetCollectionForAlbum() -> PHAssetCollection? {
var firstObject: PHAssetCollection?
let fetchOptions = PHFetchOptions()
fetchOptions.predicate = NSPredicate(format: "title = %#", CustomPhotoAlbum.albumName)
DispatchQueue.main.async {
self.collection = PHAssetCollection.fetchAssetCollections(with: .album, subtype: .any, options: fetchOptions)
if let _: AnyObject = self.collection.firstObject {
firstObject = self.collection.firstObject
}
}
if firstObject != nil {
return firstObject
} else {
return nil
}
}
func save(image: UIImage) {
if assetCollection == nil {
return // if there was an error upstream, skip the save
}
PHPhotoLibrary.shared().performChanges({
let assetChangeRequest = PHAssetChangeRequest.creationRequestForAsset(from: image)
let assetPlaceHolder = assetChangeRequest.placeholderForCreatedAsset
let albumChangeRequest = PHAssetCollectionChangeRequest(for: self.assetCollection)
let enumeration: NSArray = [assetPlaceHolder!]
albumChangeRequest!.addAssets(enumeration)
}, completionHandler: nil)
}
}
and when I use this code to save the image is:
func saveImage() {
let snapShot:UIView = backgroundImage.snapshotView(afterScreenUpdates: true)!
UIGraphicsBeginImageContext(backgroundImage.bounds.size)
snapShot.drawHierarchy(in: backgroundImage.bounds, afterScreenUpdates: true)
let image:UIImage = UIGraphicsGetImageFromCurrentImageContext()!
CustomPhotoAlbum.sharedInstance.save(image: image)
self.view.snapshotView(afterScreenUpdates: true)
}
Also I add permissions in my info.plist.
but the code is running fine on simulator but not on real device. On real device its also log an error in console:
"Error returned from daemon: Error Domain=com.apple.accounts Code=7
"(null)"" 2022-08-30 17:25:21.043516+0500 App Name[19316:1400881]
[PAAccessLogger] Failed to log access with error: access=<PATCCAccess
0x28247e5b0> accessor:<<PAApplication 0x2809d98b0
identifierType:auditToken identifier:{pid:19316, version:56046}>>
identifier:08A3A297-406E-45A2-8D0D-4C443A3F2835 kind:intervalEnd
timestampAdjustment:0 tccService:kTCCServicePhotos, error=Error
Domain=PAErrorDomain Code=10 "Possibly incomplete access interval
automatically ended by daemon"
I am using Xcode 13.3, Almost same question here asked, I tried every answer but nothing helped.
can someone please help me to fix this issue Thanks.

How do I detect loss of connection from a web socket in my Swift App

I am trying to detect loss of connection to my web socket in Swift. I have very little understanding of web sockets but I have tried googling for information but with little success. Here is my custom class providing a web socket service;
//
// Services.swift
// BetVictorTask
//
// Created by Stephen Learmonth on 28/02/2022.
//
import UIKit
protocol NetworkServicesDelegateProtocol: AnyObject {
func sendStockInfo(stocksInfo: [String: StockInfo])
}
class NetworkServices: NSObject, URLSessionWebSocketDelegate {
static let sharedInstance = NetworkServices()
var webSocketTask: URLSessionWebSocketTask?
var stocksInfo: [String: StockInfo] = [:]
var socketResults: [String: [StockInfo]] = [:]
weak var delegate: NetworkServicesDelegateProtocol?
func fetchStockInfo(symbols: [String], delegate: CompanyPriceListVC) {
self.delegate = delegate
let urlSession = URLSession(configuration: .default)
webSocketTask = urlSession.webSocketTask(with: FINNHUB_SOCKET_STOCK_INFO_URL!)
webSocketTask!.resume()
for symbol in symbols {
let string = FINNHUB_SOCKET_MESSAGE_STRING + symbol + "\"}"
let message = URLSessionWebSocketTask.Message.string(string)
webSocketTask!.send(message) { error in
if let error = error {
print("Error sending message: \(error)")
}
self.receiveMessage()
}
}
}
private func receiveMessage() {
self.webSocketTask!.receive { result in
switch result {
case .failure(let error):
print("Error receiving message: \(error)")
case .success(.string(let jsonData)):
guard let stockData = jsonData.data(using: .utf8) else { return }
self.socketResults = [:]
self.stocksInfo = [:]
let decoder = JSONDecoder()
do {
let socketData = try decoder.decode(SocketData.self, from: stockData)
guard let stockInfoData = socketData.data else { return }
for stockInfo in stockInfoData {
let symbol = stockInfo.symbol
if self.socketResults[symbol] == nil {
self.socketResults[symbol] = [StockInfo]()
}
self.socketResults[symbol]?.append(stockInfo)
}
for (symbol, stocks) in self.socketResults {
for item in stocks {
if self.stocksInfo[symbol] == nil {
self.stocksInfo[symbol] = item
} else if item.timestamp > self.stocksInfo[symbol]!.timestamp {
self.stocksInfo[symbol] = item
}
}
}
self.delegate?.sendStockInfo(stocksInfo: self.stocksInfo)
self.receiveMessage()
} catch {
print("Error converting JSON: \(error)")
}
default:
print("default")
}
}
}
func closeWebSocketConnection() {
webSocketTask?.cancel(with: .goingAway, reason: nil)
}
func urlSession(_ session: URLSession, webSocketTask: URLSessionWebSocketTask, didCloseWith closeCode: URLSessionWebSocketTask.CloseCode, reason: Data?) {
print("Got here")
}
func urlSession(_ session: URLSession, task: URLSessionTask, didCompleteWithError error: Error?) {
guard let error = error as? URLError else { return }
print("got here")
}
func fetchCompanyInfo(symbol: String, completion: #escaping (CompanyInfo?, UIImage?)->()) {
let urlString = FINNHUB_HTTP_COMPANY_INFO_URL_STRING + symbol + "&token=" + FINNHUB_API_TOKEN
guard let url = URL(string: urlString) else { return }
let task = URLSession.shared.dataTask(with: url) { data, response, error in
if let error = error {
print("Error fetching company info: \(error)")
}
guard let data = data else { return }
let decoder = JSONDecoder()
do {
let companyInfo = try decoder.decode(CompanyInfo.self, from: data)
guard let logoURL = URL(string: companyInfo.logo) else { return }
let task = URLSession.shared.dataTask(with: logoURL) { data, response, error in
if let error = error {
print("Error fetching logo image: \(error)")
}
guard let data = data else { return }
guard let logoImage = UIImage(data: data) else { return }
completion(companyInfo, logoImage)
}
task.resume()
} catch {
print("Error decoding JSON: \(error)")
completion(nil, nil)
}
}
task.resume()
}
}
I have tried implementing;
func urlSession(_ session: URLSession, webSocketTask: URLSessionWebSocketTask, didCloseWith closeCode: URLSessionWebSocketTask.CloseCode, reason: Data?) {
print("Got here")
}
and then run the app and turn off WiFi to simulate loss of connection but the urlSession method never gets called.
How can I simulate loss of web socket connection while running my app and detect this disconnection. Also I need to think of a way to reconnect the web socket

For loop not accounting for all items?

I have a multi-selection imagepicker with the intention of allowing the user to select multiple assets, then upload each asset to the database. In the completion handler, I take all the selected assets and pass them to a custom function: uploadImageAssets(assets: [PHAsset], projectRef: DocumentReference), where the upload begins.
In the function, I'm using a for-loop to upload each asset individually. While the assets are being uploaded correctly, not all assets are being uploaded. Lets say I've selected 5 assets... Only 4 will show up in the database, and they'll all be the same image, repeated. Any idea as to why this is happening? Here is my code below:
Image Picker Selection:
#IBAction func uploadProjectTapped(_ sender: Any) {
let imagePicker = ImagePickerController()
imagePicker.settings.selection.max = 10
imagePicker.settings.theme.selectionStyle = .numbered
imagePicker.settings.fetch.assets.supportedMediaTypes = [.image, .video]
imagePicker.settings.selection.unselectOnReachingMax = false
let start = Date()
self.presentImagePicker(imagePicker, select: { (asset) in
print("Selected: \(asset)")
}, deselect: { (asset) in
print("Deselected: \(asset)")
}, cancel: { (assets) in
print("Canceled with selections: \(assets)")
}, finish: { (assets) in
print("Finished with selections: \(assets)")
self.getAssetThumbnail(assets: assets)
}, completion: {
let finish = Date()
print(finish.timeIntervalSince(start))
})
}
And, the function to add them to Firestore:
func uploadImageAsset(assets: [PHAsset], projectRef: DocumentReference) {
let userID = Auth.auth().currentUser?.uid
let db = Firestore.firestore()
let manager = PHImageManager.default()
let option = PHImageRequestOptions()
option.isSynchronous = false
option.isNetworkAccessAllowed = true
option.resizeMode = .exact
option.version = .original
option.deliveryMode = .highQualityFormat
let uniqueImageID = NSUUID().uuidString
let storageRef = Storage.storage().reference().child("project-images").child("\(uniqueImageID).jpeg")
for asset in assets {
let imageSize = CGSize(width: asset.pixelWidth, height: asset.pixelHeight)
manager.requestImage(for: asset, targetSize: imageSize, contentMode: .aspectFill, options: option) { (image, info) in
let uploadData = image?.jpegData(compressionQuality: 0.6)
storageRef.putData(uploadData!, metadata: nil, completion: {
(metadata, error) in
if error != nil {
return
} else {
storageRef.getMetadata(completion: { (metadata, err) in
if let error = err {
print(error)
} else {
storageRef.downloadURL(completion: { (url, err) in
if let error = err {
print(error)
} else {
self.imageAssetURLs.append((url?.absoluteString)!)
guard let url = url?.absoluteString else { return }
projectRef.updateData(["images": FieldValue.arrayUnion([url])], completion: { (err) in
if err != nil {
print(err)
} else {
self.dismiss(animated: true, completion: nil)
}
})
}
})
}
})
}
})
}
}
}
I have a strong feeling that the error lies within this line:
self.imageAssetURLs.append((url?.absoluteString)!)
guard let url = url?.absoluteString else { return }
in func uploadImageAsset(...) the
let uniqueImageID = NSUUID().uuidString
and
let storageRef = Storage.storage().reference().child("project-images").child("\(uniqueImageID).jpeg")
should be inside the loop just before
storageRef.putData(..)

After compressing my audio file, why can I not play the file?

Audio file will not play after reducing it using AVAssetReader/ AVAssetWriter
At the moment, the whole function is being executed fine, with no errors thrown.
For some reason, when I go inside the document directory of the simulator via terminal, the audio file will not play through iTunes and comes up with error when trying to open with quicktime "QuickTime Player can't open "test1.m4a"
Does anyone specialise in this area and understand why this isn't working?
protocol FileConverterDelegate {
func fileConversionCompleted()
}
class WKAudioTools: NSObject {
var delegate: FileConverterDelegate?
var url: URL?
var assetReader: AVAssetReader?
var assetWriter: AVAssetWriter?
func convertAudio() {
let documentDirectory = try! FileManager.default.url(for: .documentDirectory, in: .userDomainMask, appropriateFor: nil, create: true)
let exportURL = documentDirectory.appendingPathComponent(Assets.soundName1).appendingPathExtension("m4a")
url = Bundle.main.url(forResource: Assets.soundName1, withExtension: Assets.mp3)
guard let assetURL = url else { return }
let asset = AVAsset(url: assetURL)
//reader
do {
assetReader = try AVAssetReader(asset: asset)
} catch let error {
print("Error with reading >> \(error.localizedDescription)")
}
let assetReaderOutput = AVAssetReaderAudioMixOutput(audioTracks: asset.tracks, audioSettings: nil)
//let assetReaderOutput = AVAssetReaderTrackOutput(track: track!, outputSettings: nil)
guard let assetReader = assetReader else {
print("reader is nil")
return
}
if assetReader.canAdd(assetReaderOutput) == false {
print("Can't add output to the reader ☹️")
return
}
assetReader.add(assetReaderOutput)
// writer
do {
assetWriter = try AVAssetWriter(outputURL: exportURL, fileType: .m4a)
} catch let error {
print("Error with writing >> \(error.localizedDescription)")
}
var channelLayout = AudioChannelLayout()
memset(&channelLayout, 0, MemoryLayout.size(ofValue: channelLayout))
channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo
// use different values to affect the downsampling/compression
let outputSettings: [String: Any] = [AVFormatIDKey: kAudioFormatMPEG4AAC,
AVSampleRateKey: 44100.0,
AVNumberOfChannelsKey: 2,
AVEncoderBitRateKey: 128000,
AVChannelLayoutKey: NSData(bytes: &channelLayout, length: MemoryLayout.size(ofValue: channelLayout))]
let assetWriterInput = AVAssetWriterInput(mediaType: .audio, outputSettings: outputSettings)
guard let assetWriter = assetWriter else { return }
if assetWriter.canAdd(assetWriterInput) == false {
print("Can't add asset writer input ☹️")
return
}
assetWriter.add(assetWriterInput)
assetWriterInput.expectsMediaDataInRealTime = false
// MARK: - File conversion
assetWriter.startWriting()
assetReader.startReading()
let audioTrack = asset.tracks[0]
let startTime = CMTime(seconds: 0, preferredTimescale: audioTrack.naturalTimeScale)
assetWriter.startSession(atSourceTime: startTime)
// We need to do this on another thread, so let's set up a dispatch group...
var convertedByteCount = 0
let dispatchGroup = DispatchGroup()
let mediaInputQueue = DispatchQueue(label: "mediaInputQueue")
//... and go
dispatchGroup.enter()
assetWriterInput.requestMediaDataWhenReady(on: mediaInputQueue) {
while assetWriterInput.isReadyForMoreMediaData {
let nextBuffer = assetReaderOutput.copyNextSampleBuffer()
if nextBuffer != nil {
assetWriterInput.append(nextBuffer!) // FIXME: Handle this safely
convertedByteCount += CMSampleBufferGetTotalSampleSize(nextBuffer!)
} else {
// done!
assetWriterInput.markAsFinished()
assetReader.cancelReading()
dispatchGroup.leave()
DispatchQueue.main.async {
// Notify delegate that conversion is complete
self.delegate?.fileConversionCompleted()
print("Process complete 🎧")
if assetWriter.status == .failed {
print("Writing asset failed ☹️ Error: ", assetWriter.error)
}
}
break
}
}
}
}
}
You need to call finishWriting on your AVAssetWriter to get the output completely written:
assetWriter.finishWriting {
DispatchQueue.main.async {
// Notify delegate that conversion is complete
self.delegate?.fileConversionCompleted()
print("Process complete 🎧")
if assetWriter.status == .failed {
print("Writing asset failed ☹️ Error: ", assetWriter.error)
}
}
}
If exportURL exists before you start the conversion, you should remove it, otherwise the conversion will fail:
try! FileManager.default.removeItem(at: exportURL)
As #matt points out, why the buffer stuff when you could do the conversion more simply with an AVAssetExportSession, and also why convert one of your own assets when you could distribute it already in the desired format?

Reading a barcode image without using cocoapods or other external API's

I'm trying to use the new Apple Vision API to detect a barcode from an image and return its details. I've successfully detected a QR code and returned a message using the CIDetector. However I can't make this work for 1 dimensional barcodes. Heres an example result:
import UIKit
import Vision
class BarcodeDetector {
func recognizeBarcode(for source: UIImage,
complete: #escaping (UIImage) -> Void) {
var resultImage = source
let detectBarcodeRequest = VNDetectBarcodesRequest { (request, error) in
if error == nil {
if let results = request.results as? [VNBarcodeObservation] {
print("Number of Barcodes found: \(results.count)")
if results.count == 0 { print("\r") }
var barcodeBoundingRects = [CGRect]()
for barcode in results {
barcodeBoundingRects.append(barcode.boundingBox)
let barcodeType = String(barcode.symbology.rawValue)?.replacingOccurrences(of: "VNBarcodeSymbology", with: "")
print("-Barcode Type: \(barcodeType!)")
if barcodeType == "QR" {
let image = CIImage(image: source)
image?.cropping(to: barcode.boundingBox)
self.qrCodeDescriptor(qrCode: barcode, qrCodeImage: image!)
}
}
resultImage = self.drawOnImage(source: resultImage, barcodeBoundingRects: barcodeBoundingRects)
}
} else {
print(error!.localizedDescription)
}
complete(resultImage)
}
let vnImage = VNImageRequestHandler(cgImage: source.cgImage!, options: [:])
try? vnImage.perform([detectBarcodeRequest])
}
private func qrCodeDescriptor(qrCode: VNBarcodeObservation, qrCodeImage: CIImage) {
if let description = qrCode.barcodeDescriptor as? CIQRCodeDescriptor {
readQRCode(qrCodeImage: qrCodeImage)
print(" -Payload: \(description.errorCorrectedPayload)")
print(" -Mask Pattern: \(description.maskPattern)")
print(" -Symbol Version: \(description.symbolVersion)\n")
}
}
private func readQRCode(qrCodeImage: CIImage) {
let detector: CIDetector = CIDetector(ofType: CIDetectorTypeQRCode, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])!
var qrCodeLink = ""
let features = detector.features(in: qrCodeImage)
for feature in features as! [CIQRCodeFeature] {
if let messageString = feature.messageString {
qrCodeLink += messageString
}
}
if qrCodeLink == "" {
print(" -No Code Message")
} else {
print(" -Code Message: \(qrCodeLink)")
}
}
How can I convert the image into an AVMetadataObject and then read it from there? Or is there a better approach?
Swift 4.1, using the Vision Framework (No 3rd party stuff or Pods)
Try this. It works for QR and for other types (Code39 in this example):
func startDetection() {
let request = VNDetectBarcodesRequest(completionHandler: self.detectHandler)
request.symbologies = [VNBarcodeSymbology.code39] // or use .QR, etc
self.requests = [request]
}
func detectHandler(request: VNRequest, error: Error?) {
guard let observations = request.results else {
//print("no result")
return
}
let results = observations.map({$0 as? VNBarcodeObservation})
for result in results {
print(result!.payloadStringValue!)
}
}
And then in:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
var requestOptions:[VNImageOption:Any] = [:]
if let camData = CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, nil) {
requestOptions = [.cameraIntrinsics:camData]
}
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: CGImagePropertyOrientation(rawValue: 6)!, options: requestOptions)
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
The rest of the implementation is the regular AVCaptureDevice and AVCaptureSession stuff. You will also need to conform to AVCaptureVideoDataOutputSampleBufferDelegate
import AVFoundation
import Vision
var captureDevice: AVCaptureDevice!
var session = AVCaptureSession()
var requests = [VNRequest]()
func viewDidLoad() {
self.setupVideo()
self.startDetection()
}
func setupVideo() {
session.sessionPreset = AVCaptureSession.Preset.photo
captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
let deviceInput = try! AVCaptureDeviceInput(device: captureDevice!)
let deviceOutput = AVCaptureVideoDataOutput()
deviceOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
deviceOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
session.addInput(deviceInput)
session.addOutput(deviceOutput)
let imageLayer = AVCaptureVideoPreviewLayer(session: session)
imageLayer.frame = imageView.bounds
imageView.layer.addSublayer(imageLayer)
session.startRunning()
}