I need help....I only want to print the key value, I don't want to read all the Object
I created a QRCode using this code:
#IBAction func btnCreate(_ sender: Any) {
if sectionNameTxt.text == "" || sectionExtTxt.text == "" || sectionLocationTxt.text == "" {
}else{
let dic = ["sectionName": sectionNameTxt.text!,"sectionExt": sectionExtTxt.text!,"sectionLocation": sectionLocationTxt.text!]
print("dic:\(dic)")
imgView.image = QRGenerator.generate(from: dic)
do {
let jsonData = try JSONEncoder().encode(dic)
if let filter = CIFilter(name: "CIQRCodeGenerator") {
filter.setValue(jsonData, forKey: "inputMessage")
let transform = CGAffineTransform(scaleX: 10, y: 10)
if let output = filter.outputImage?.transformed(by: transform) {
imgView.image = UIImage(ciImage: output)
}
}
} catch {
print(error.localizedDescription)
}
}
}
This code is to read the QRCode:
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
if let metadataObject = metadataObjects.first{
guard let readleObject = metadataObject as? AVMetadataMachineReadableCodeObject else {return}
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
print((readleObject.stringValue!))
session.stopRunning()
self.dismiss(animated: true, completion: nil)
}
output :
{"sectionName":"pharmacy","sectionExt":"1010","sectionLocation":"Main Building - Ground Floor"}
I want read the value of the key1(sectionName) and key2(sectionExt) and key3(sectionLocation) ?
You can use JSONSerialization.
if let data = readleObject.stringValue.data(using: .utf8) {
if let json = try? JSONSerialization.jsonObject(with: data, options: []), let dataDict = json as? NSDictionary {
if let sectionName = dataDict["sectionName"] as? String
}
}
}
You can do the rest.
Related
Im trying to add the devices microphone audio to a video recording from the devices camera. The video is filtered with a CIFilter and works as expected. My problem is the mic audio is not attached to the video once saved.
I have tried setting the audio settings manually like this
let audioSettings : [String : Any] = [
AVFormatIDKey : kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey: 1,
AVSampleRateKey : 44100,
AVEncoderBitRateKey : 64000
]
but using the recommendedAudioSettingsForAssetWriter method seems like the correct approach as the video recording works with the recommendedAudioSettingsForAssetWriter method.
Can anyone tell me how to achieve this or point me in the right direction?
My code so far:
import UIKit
import AVFoundation
class VideoViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
#IBOutlet weak var imageView: UIImageView!
lazy var cameraDevice: AVCaptureDevice? = {
return AVCaptureDevice.default(for: .video)
}()
lazy var micDevice: AVCaptureDevice? = {
return AVCaptureDevice.default(for: .audio)
}()
var captureSession = AVCaptureSession()
var outputURL: URL!
var orientation: AVCaptureVideoOrientation = .landscapeRight
var filterObject = FilterObject()
var assetWriter: AVAssetWriter?
var assetWriterInput: AVAssetWriterInput?
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor?
var fileName = ""
var recordingState = RecordingState.idle
var time: Double = 0
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
let context = CIContext()
override func viewDidLoad() {
super.viewDidLoad()
setupCameraDevice()
setupAudioDevice()
setupInputOutput()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
setUpAuthStatus()
}
#IBAction func recordPressed(_ sender: UIButton) {
switch recordingState {
case .idle:
recordingState = .start
case .capturing:
recordingState = .end
default:
break
}
}
func setUpAuthStatus() {
if AVCaptureDevice.authorizationStatus(for: AVMediaType.video) != .authorized {
AVCaptureDevice.requestAccess(for: AVMediaType.video, completionHandler: { (authorized) in
DispatchQueue.main.async {
if authorized {
self.setupInputOutput()
}
}
})
}
if AVCaptureDevice.authorizationStatus(for: AVMediaType.audio) != .authorized {
AVCaptureDevice.requestAccess(for: AVMediaType.audio, completionHandler: { (authorized) in
DispatchQueue.main.async {
if authorized {
self.setupInputOutput()
}
}
})
}
}
func setupCameraDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == .back {
cameraDevice = device
}
}
}
func setupAudioDevice() {
let audioDeviceDisoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInMicrophone], mediaType: .audio, position: .unspecified)
let devices = audioDeviceDisoverySession.devices
micDevice = devices[0]
}
func setupInputOutput() {
do {
guard let cameraDevice = cameraDevice else { return }
let captureDeviceInput = try AVCaptureDeviceInput(device: cameraDevice)
guard let micDevice = micDevice else { return }
let micDeviceInput = try AVCaptureDeviceInput(device: micDevice)
captureSession.sessionPreset = AVCaptureSession.Preset.hd1920x1080
if captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
}
if captureSession.canAddInput(micDeviceInput) {
captureSession.addInput(micDeviceInput)
}
let queue = DispatchQueue(label: "com.apple.sample.capturepipeline.video", attributes: [])
if captureSession.canAddOutput(videoOutput) {
videoOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(videoOutput)
}
if captureSession.canAddOutput(audioOutput) {
audioOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(audioOutput)
}
captureSession.commitConfiguration()
captureSession.startRunning()
} catch {
print(error)
}
}
func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
audioOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
let cameraImage = CIImage(cvImageBuffer: imageBuffer)
guard let name = filterObject.name else {return}
let effect = FilterType.genericFilter(name: name, cameraImage: cameraImage)
effect.setValue(cameraImage, forKey: kCIInputImageKey)
TableData.setFilterValues(withFilterName: name, effect: effect, values: [value1, value2])
guard let outputImage = effect.outputImage else { return }
context.render(outputImage, to: imageBuffer)
guard let cgImage = self.context.createCGImage(outputImage, from: cameraImage.extent) else { return }
DispatchQueue.main.async {
let filteredImage = UIImage(cgImage: cgImage)
self.imageView.image = filteredImage
}
let timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer).seconds
switch recordingState {
case .start:
fileName = UUID().uuidString
let videoPath = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(fileName).mov")
let writer = try! AVAssetWriter(outputURL: videoPath, fileType: .mov)
let videoSettings = videoOutput.recommendedVideoSettingsForAssetWriter(writingTo: .mov)
let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
videoInput.mediaTimeScale = CMTimeScale(bitPattern: 600)
videoInput.expectsMediaDataInRealTime = true
let audioSettings = audioOutput.recommendedAudioSettingsForAssetWriter(writingTo: .m4a)
let audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings as? [String : Any])
audioInput.expectsMediaDataInRealTime = true
//videoInput.transform = CGAffineTransform(rotationAngle: .pi/2)
let pixelAdapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: nil)
if writer.canAdd(videoInput) {
writer.add(videoInput)
}
if writer.canAdd(audioInput) {
writer.add(audioInput)
}
writer.startWriting()
writer.startSession(atSourceTime: .zero)
assetWriter = writer
assetWriterInput = videoInput
pixelBufferAdaptor = pixelAdapter
recordingState = .capturing
time = timestamp
case .capturing:
if assetWriterInput?.isReadyForMoreMediaData == true {
let newTime = CMTime(seconds: timestamp - time, preferredTimescale: CMTimeScale(600))
pixelBufferAdaptor?.append(imageBuffer, withPresentationTime: newTime)
}
break
case .end:
guard assetWriterInput?.isReadyForMoreMediaData == true, assetWriter!.status != .failed else { break }
let url = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(fileName).mov")
assetWriterInput?.markAsFinished()
assetWriter?.finishWriting { [weak self] in
self?.recordingState = .idle
self?.assetWriter = nil
self?.assetWriterInput = nil
DispatchQueue.main.async {
let activity = UIActivityViewController(activityItems: [url], applicationActivities: nil)
self?.present(activity, animated: true, completion: nil)
}
}
default:
break
}
}
}
Your audio settings do not look correct. The AVSampleRateKey should come from the number of samples from the description of the first audio sample that comes in. Your value of 44100 should be set as the value for the AVEncoderBitRateKey and that should maybe be set to AVEncoderBitRateKey: Int(48_000)
To get the number of sample first call
let fmt = CMSampleBufferGetFormatDescription(sampleBuffer)
let asbd = CMAudioFormatDescriptionGetStreamBasicDescription(fmt!)
and then the sample rate will be asbd?.pointee.mSampleRate and then that should be set as the AVSampleRateKey value in the audio settings (I think)
I want him to draw the route between the two coordinates and go to the route, but he doesn't draw the route. It prints out success but cannot draw the route. What is the problem? How can I draw two routes on the map and show the details of this route?
func drawPath()
{
let kordinatgetir = keychain.get("uyeKordinat")
let doubleKordinat :Double = Double(kordinatgetir!)!
let kordinatgetir1 = keychain.get("uyeKordinat1")
let doubleKordinat1 :Double = Double(kordinatgetir1!)!
let origin = "\(doubleKordinat),\(doubleKordinat1)"
let destination = "\(doubleKordinat1),\(doubleKordinat)"
let url = "https://maps.googleapis.com/maps/api/directions/json?origin=\(origin)&destination=\(destination)&mode=driving&key=..."
Alamofire.request(url).responseJSON { response in
print(response.request) // original URL request
print(response.response) // HTTP URL response
print(response.data) // server data
print(response.result) // result of response serialization
do {
let json = try JSON(data: response.data!)
let routes = json["routes"].arrayValue
for route in routes
{
let routeOverviewPolyline = route["overview_polyline"].dictionary
let points = routeOverviewPolyline?["points"]?.stringValue
let path = GMSPath.init(fromEncodedPath: points!)
let polyline = GMSPolyline.init(path: path)
polyline.map = self.mapView
}
} catch {
print(error)
}
}
}
func drowRoute(from source: CLLocationCoordinate2D, to destination: CLLocationCoordinate2D) {
self.mapView.clear()
let origin = "\(source.latitude),\(source.longitude)"
let destinationn = "\(destination.latitude),\(destination.longitude)"
guard let url = URL(string: "https://maps.googleapis.com/maps/api/directions/json?origin=\(origin)&destination=\(destinationn)&mode=driving&key=Your Key") else {
let error = NSError(domain: "LocalDomain", code: 0, userInfo: [NSLocalizedDescriptionKey: "Failed to create object URL"])
print("Error: \(error)")
//completionHandler(nil, error)
return
}
let config = URLSessionConfiguration.default
let session = URLSession(configuration: config)
SVProgressHUD.show()
let task = session.dataTask(with: url, completionHandler: {
(data, response, error) in
if error != nil {
print(error!.localizedDescription)
SVProgressHUD.dismiss()
}
else {
do {
if let json : [String:Any] = try JSONSerialization.jsonObject(with: data!, options: .allowFragments) as? [String: Any]{
guard let routes = json["routes"] as? NSArray else {
DispatchQueue.main.async {
SVProgressHUD.dismiss()
}
return
}
if (routes.count > 0) {
let overview_polyline = routes[0] as? NSDictionary
let dictPolyline = overview_polyline?["overview_polyline"] as? NSDictionary
let points = dictPolyline?.object(forKey: "points") as? String
DispatchQueue.main.async {
//
let legs = overview_polyline?["legs"] as! Array<Dictionary<String, AnyObject>>
let distance = legs[0]["distance"] as? NSDictionary
let distanceValue = distance?["value"] as? Int ?? 0
let duration = legs[0]["duration"] as? NSDictionary
let totalDurationInSeconds = duration?["value"] as? Int ?? 0
let miles = Double(distanceValue) / 1609.344
print("\(miles)")
if distanceValue > Int(32186.9){
SVProgressHUD.dismiss()
self.showAlert(title: Appname, message: "Your dropping point is more than 20 miles")
self.txtToLocation.text = ""
self.txtToLocation.becomeFirstResponder()
}else{
self.showPath(polyStr: points!)
let startLocationDictionary = legs[0]["start_location"] as! Dictionary<String, AnyObject>
let originCoordinate = CLLocationCoordinate2DMake(startLocationDictionary["lat"] as! Double, startLocationDictionary["lng"] as! Double)
let endLocationDictionary = legs[legs.count - 1]["end_location"] as! Dictionary<String, AnyObject>
let destinationCoordinate = CLLocationCoordinate2DMake(endLocationDictionary["lat"] as! Double, endLocationDictionary["lng"] as! Double)
let marker1 = GMSMarker()
marker1.position = CLLocationCoordinate2D(latitude:destinationCoordinate.latitude, longitude: destinationCoordinate.longitude)
marker1.icon = UIImage(named: "icn_pin-1")
marker1.map = self.mapView
let marker2 = GMSMarker()
marker2.position = CLLocationCoordinate2D(latitude:originCoordinate.latitude, longitude: originCoordinate.longitude)
marker2.icon = UIImage(named: "icn_pin2")
marker2.map = self.mapView
}
}
}
else {
print(json)
DispatchQueue.main.async {
SVProgressHUD.dismiss()
}
}
}
}
catch {
print("error in JSONSerialization")
DispatchQueue.main.async {
SVProgressHUD.dismiss()
}
}
}
})
task.resume()
}
func showPath(polyStr :String){
SVProgressHUD.dismiss()
let path = GMSPath(fromEncodedPath: polyStr)
let polyline = GMSPolyline(path: path)
polyline.strokeWidth = 5.0
polyline.strokeColor = UIColor.UIColorFromHex(hex: "#F6881F")
polyline.map = mapView
DispatchQueue.main.async {
let bounds = GMSCoordinateBounds(path: path!)
let update = GMSCameraUpdate.fit(bounds, with: UIEdgeInsets(top: 170, left: 30, bottom: 30, right: 30))
self.mapView.moveCamera(update)
}
}
When I try to run my camera I get an error message that says following
terminating with uncaught exception of type NSException and "Multiple audio/video AVCaptureInputs are not currently supported"
I have been watching for solutions on stackOverflow but haven't been succesful.
Tried to redirect my outlet my I can't understand where the problem occurs. Therefor I tried to put some breakpoint to find it but didn't manage
let captureSession = AVCaptureSession()
var previewLayer:CALayer!
var captureDevice:AVCaptureDevice!
var takePhoto = false
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
prepareCamera()
}
func prepareCamera() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices
captureDevice = availableDevices.first
beginSession()
}
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
}catch {
print(error.localizedDescription)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer = previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString):NSNumber(value:kCVPixelFormatType_32BGRA)] as [String : Any]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.brianadvent.captureQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
#IBAction func takePhoto(_ sender: Any) {
takePhoto = true
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if takePhoto {
takePhoto = false
if let image = self.getImageFromSampleBuffer(buffer: sampleBuffer) {
let photoVC = UIStoryboard(name: "Main", bundle: nil).instantiateViewController(withIdentifier: "PhotoVC") as! Viewcontroller2
photoVC.takenPhoto = image
DispatchQueue.main.async {
self.present(photoVC, animated: true, completion: {
self.stopCaptureSession()
})
}
}
}
}
func getImageFromSampleBuffer (buffer:CMSampleBuffer) -> UIImage? {
if let pixelBuffer = CMSampleBufferGetImageBuffer(buffer) {
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext()
let imageRect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
if let image = context.createCGImage(ciImage, from: imageRect) {
return UIImage(cgImage: image, scale: UIScreen.main.scale, orientation: .right)
}
}
return nil
}
func stopCaptureSession () {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
struct Constants {
static let apiKey = "AIzaSyDtaJ5eU24rbnHsG9pb1STOizDJvqcaj5E"
static let bundleId = "com.felibundle"
static let searchEngineId = "016628067786358079133:2gm9usqzouc"
}
#IBAction func pish(_ sender: Any) {
googleSearch(term: "George Bush") { results in
print(results)}
}
func googleSearch(term: String, callback:#escaping ([(title: String, url: String)]?) -> Void) {
let urlString = String(format: "https://cse.google.com/cse?cx=016628067786358079133:2gm9usqzouc", term, Constants.searchEngineId, Constants.apiKey)
let encodedUrl = urlString.addingPercentEncoding(withAllowedCharacters: .urlQueryAllowed)
guard let url = URL(string: encodedUrl ?? urlString) else {
print("invalid url \(urlString)")
return
}
let request = NSMutableURLRequest(url: url, cachePolicy: .useProtocolCachePolicy, timeoutInterval: 10)
request.httpMethod = "GET"
request.setValue(Constants.bundleId, forHTTPHeaderField: "X-Ios-Bundle-Identifier")
let session = URLSession.shared
let datatask = session.dataTask(with: request as URLRequest) { (data, response, error) in
guard
error == nil,
let data = data,
let json = try? JSONSerialization.jsonObject(with: data, options: .allowFragments) as? [String : Any]
else {
callback(nil)
return
}
guard let items = json?["items"] as? [[String : Any]], items.count > 0 else {
print("no results")
return
}
callback(items.map { ($0["title"] as! String, $0["formattedUrl"] as! String) })
}
datatask.resume()
}
}
Your code runs fine at my end.
However, this kind of error arrives when we try to add multiple input device to the same session. Make sure you are not adding AVCaptureInputs object elsewhere in your project
I got a source code from a github page written in swift and implementing GoogleMaps. I now want to refactor the codes to use Alamofire and SwiftyJSON so that I can improve the code but I got confused because through my learning of swift I used Alamofire and swiftyJSON for every networking process so I am confused currently. the code below
typealias PlacesCompletion = ([GooglePlace]) -> Void
typealias PhotoCompletion = (UIImage?) -> Void
class GoogleDataProvider {
private var photoCache: [String: UIImage] = [:]
private var placesTask: URLSessionDataTask?
private var session: URLSession {
return URLSession.shared
}
let appDelegate = UIApplication.shared.delegate as! AppDelegate
func fetchPlacesNearCoordinate(_ coordinate: CLLocationCoordinate2D, radius: Double, types:[String], completion: #escaping PlacesCompletion) -> Void {
var urlString = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\(coordinate.latitude),\(coordinate.longitude)&radius=\(radius)&rankby=prominence&sensor=true&key=\(appDelegate.APP_ID)"
let typesString = types.count > 0 ? types.joined(separator: "|") : "food"
urlString += "&types=\(typesString)"
urlString = urlString.addingPercentEncoding(withAllowedCharacters: CharacterSet.urlQueryAllowed) ?? urlString
guard let url = URL(string: urlString) else {
completion([])
return
}
if let task = placesTask, task.taskIdentifier > 0 && task.state == .running {
task.cancel()
}
DispatchQueue.main.async {
UIApplication.shared.isNetworkActivityIndicatorVisible = true
}
placesTask = session.dataTask(with: url) { data, response, error in
var placesArray: [GooglePlace] = []
defer {
DispatchQueue.main.async {
UIApplication.shared.isNetworkActivityIndicatorVisible = false
completion(placesArray)
}
}
guard let data = data,
let json = try? JSON(data: data, options: .mutableContainers),
let results = json["results"].arrayObject as? [[String: Any]] else {
return
}
results.forEach {
let place = GooglePlace(dictionary: $0, acceptedTypes: types)
placesArray.append(place)
if let reference = place.photoReference {
self.fetchPhotoFromReference(reference) { image in
place.photo = image
}
}
}
}
placesTask?.resume()
}
func fetchPhotoFromReference(_ reference: String, completion: #escaping PhotoCompletion) -> Void {
if let photo = photoCache[reference] {
completion(photo)
} else {
let urlString = "https://maps.googleapis.com/maps/api/place/photo?maxwidth=200&photoreference=\(reference)&key=\(appDelegate.APP_ID)"
guard let url = URL(string: urlString) else {
completion(nil)
return
}
DispatchQueue.main.async {
UIApplication.shared.isNetworkActivityIndicatorVisible = true
}
session.downloadTask(with: url) { url, response, error in
var downloadedPhoto: UIImage? = nil
defer {
DispatchQueue.main.async {
UIApplication.shared.isNetworkActivityIndicatorVisible = false
completion(downloadedPhoto)
}
}
guard let url = url else {
return
}
guard let imageData = try? Data(contentsOf: url) else {
return
}
downloadedPhoto = UIImage(data: imageData)
self.photoCache[reference] = downloadedPhoto
}
.resume()
}
}
}
any help to refactor the codes to use Alamofire and swiftyJSON would be appreciated.
Both Alamofire and SwiftyJSON have pretty decent instructions, and there are plenty of examples online to look for. However, this would be a decent starting point - you need to replace your session.dataTask and session.downloadTask with Alamofire methods. For example, instead of:
session.downloadTask(with: url) { url, response, error in
var downloadedPhoto: UIImage? = nil
defer {
DispatchQueue.main.async {
UIApplication.shared.isNetworkActivityIndicatorVisible = false
completion(downloadedPhoto)
}
}
guard let url = url else {
return
}
guard let imageData = try? Data(contentsOf: url) else {
return
}
downloadedPhoto = UIImage(data: imageData)
self.photoCache[reference] = downloadedPhoto
}
.resume()
use this skeleton and implement your models and logic:
Alamofire
.request(url)
.responseJSON { dataResponse in
switch dataResponse.result {
case .success:
guard let json = JSON(dataResponse.data) else {
return
}
// Continue parsing
case .failure(let error):
// Handle error
print("\(error)")
}
}
I'm trying to use the new Apple Vision API to detect a barcode from an image and return its details. I've successfully detected a QR code and returned a message using the CIDetector. However I can't make this work for 1 dimensional barcodes. Heres an example result:
import UIKit
import Vision
class BarcodeDetector {
func recognizeBarcode(for source: UIImage,
complete: #escaping (UIImage) -> Void) {
var resultImage = source
let detectBarcodeRequest = VNDetectBarcodesRequest { (request, error) in
if error == nil {
if let results = request.results as? [VNBarcodeObservation] {
print("Number of Barcodes found: \(results.count)")
if results.count == 0 { print("\r") }
var barcodeBoundingRects = [CGRect]()
for barcode in results {
barcodeBoundingRects.append(barcode.boundingBox)
let barcodeType = String(barcode.symbology.rawValue)?.replacingOccurrences(of: "VNBarcodeSymbology", with: "")
print("-Barcode Type: \(barcodeType!)")
if barcodeType == "QR" {
let image = CIImage(image: source)
image?.cropping(to: barcode.boundingBox)
self.qrCodeDescriptor(qrCode: barcode, qrCodeImage: image!)
}
}
resultImage = self.drawOnImage(source: resultImage, barcodeBoundingRects: barcodeBoundingRects)
}
} else {
print(error!.localizedDescription)
}
complete(resultImage)
}
let vnImage = VNImageRequestHandler(cgImage: source.cgImage!, options: [:])
try? vnImage.perform([detectBarcodeRequest])
}
private func qrCodeDescriptor(qrCode: VNBarcodeObservation, qrCodeImage: CIImage) {
if let description = qrCode.barcodeDescriptor as? CIQRCodeDescriptor {
readQRCode(qrCodeImage: qrCodeImage)
print(" -Payload: \(description.errorCorrectedPayload)")
print(" -Mask Pattern: \(description.maskPattern)")
print(" -Symbol Version: \(description.symbolVersion)\n")
}
}
private func readQRCode(qrCodeImage: CIImage) {
let detector: CIDetector = CIDetector(ofType: CIDetectorTypeQRCode, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])!
var qrCodeLink = ""
let features = detector.features(in: qrCodeImage)
for feature in features as! [CIQRCodeFeature] {
if let messageString = feature.messageString {
qrCodeLink += messageString
}
}
if qrCodeLink == "" {
print(" -No Code Message")
} else {
print(" -Code Message: \(qrCodeLink)")
}
}
How can I convert the image into an AVMetadataObject and then read it from there? Or is there a better approach?
Swift 4.1, using the Vision Framework (No 3rd party stuff or Pods)
Try this. It works for QR and for other types (Code39 in this example):
func startDetection() {
let request = VNDetectBarcodesRequest(completionHandler: self.detectHandler)
request.symbologies = [VNBarcodeSymbology.code39] // or use .QR, etc
self.requests = [request]
}
func detectHandler(request: VNRequest, error: Error?) {
guard let observations = request.results else {
//print("no result")
return
}
let results = observations.map({$0 as? VNBarcodeObservation})
for result in results {
print(result!.payloadStringValue!)
}
}
And then in:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
var requestOptions:[VNImageOption:Any] = [:]
if let camData = CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, nil) {
requestOptions = [.cameraIntrinsics:camData]
}
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: CGImagePropertyOrientation(rawValue: 6)!, options: requestOptions)
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
The rest of the implementation is the regular AVCaptureDevice and AVCaptureSession stuff. You will also need to conform to AVCaptureVideoDataOutputSampleBufferDelegate
import AVFoundation
import Vision
var captureDevice: AVCaptureDevice!
var session = AVCaptureSession()
var requests = [VNRequest]()
func viewDidLoad() {
self.setupVideo()
self.startDetection()
}
func setupVideo() {
session.sessionPreset = AVCaptureSession.Preset.photo
captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
let deviceInput = try! AVCaptureDeviceInput(device: captureDevice!)
let deviceOutput = AVCaptureVideoDataOutput()
deviceOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
deviceOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
session.addInput(deviceInput)
session.addOutput(deviceOutput)
let imageLayer = AVCaptureVideoPreviewLayer(session: session)
imageLayer.frame = imageView.bounds
imageView.layer.addSublayer(imageLayer)
session.startRunning()
}