swift generate a qrcode [duplicate] - swift

This question already has answers here:
What does "Fatal error: Unexpectedly found nil while unwrapping an Optional value" mean?
(16 answers)
Closed 2 years ago.
I tried to generate a qrcode, but it has error
Thread 1: Fatal error: Unexpectedly found nil while unwrapping an Optional value
let myString = "ggigiuui"
let data = myString.data(using: .ascii, allowLossyConversion: false)
let filter = CIFilter(name: "CIQRCodeGenerator")
filter?.setValue(data, forKey: "inputMessage")
let img = UIImage(ciImage: (filter?.outputImage)!)
qponImage.image = img

I have used the following code, and it is working perfectly.Where self.imgQRCode is the imageview on which you want to display QR.
func generateQRCode(from string: String) -> UIImage?
{
let data = string.data(using: String.Encoding.ascii)
if let filter = CIFilter(name: "CIQRCodeGenerator")
{
filter.setValue(data, forKey: "inputMessage")
guard let qrImage = filter.outputImage else {return nil}
let scaleX = self.imgQRCode.frame.size.width / qrImage.extent.size.width
let scaleY = self.imgQRCode.frame.size.height / qrImage.extent.size.height
let transform = CGAffineTransform(scaleX: scaleX, y: scaleY)
if let output = filter.outputImage?.transformed(by: transform)
{
return UIImage(ciImage: output)
}
}
return nil
}

Please try this,
func generateQRCode(from string: String) -> UIImage? {
let data = string.data(using: String.Encoding.ascii)
if let filter = CIFilter(name: "CIQRCodeGenerator") {
filter.setValue(data, forKey: "inputMessage")
let transform = CGAffineTransform(scaleX: 3, y: 3)
if let output = filter.outputImage?.transformed(by: transform) {
return UIImage(ciImage: output)
}
}
return nil
}

This is how you can generate a QRCode and display in UIImageView
first of all create new Cocoa Touch Class .swift file and import these two framework:
import UIKit
import CoreImage
and the second step you just need to add the extension of URL and CIImage on the same .swift file.
extensions :
extension URL {
/// Creates a QR code for the current URL in the given color.
func qrImage(using color: UIColor, logo: UIImage? = nil) -> CIImage? {
let tintedQRImage = qrImage?.tinted(using: color)
guard let logo = logo?.cgImage else {
return tintedQRImage
}
return tintedQRImage?.combined(with: CIImage(cgImage: logo))
}
/// Returns a black and white QR code for this URL.
var qrImage: CIImage? {
guard let qrFilter = CIFilter(name: "CIQRCodeGenerator") else { return nil }
let qrData = absoluteString.data(using: String.Encoding.ascii)
qrFilter.setValue(qrData, forKey: "inputMessage")
let qrTransform = CGAffineTransform(scaleX: 12, y: 12)
return qrFilter.outputImage?.transformed(by: qrTransform)
}
}
extension CIImage {
/// Inverts the colors and creates a transparent image by converting the mask to alpha.
/// Input image should be black and white.
var transparent: CIImage? {
return inverted?.blackTransparent
}
/// Inverts the colors.
var inverted: CIImage? {
guard let invertedColorFilter = CIFilter(name: "CIColorInvert") else { return nil }
invertedColorFilter.setValue(self, forKey: "inputImage")
return invertedColorFilter.outputImage
}
/// Converts all black to transparent.
var blackTransparent: CIImage? {
guard let blackTransparentFilter = CIFilter(name: "CIMaskToAlpha") else { return nil }
blackTransparentFilter.setValue(self, forKey: "inputImage")
return blackTransparentFilter.outputImage
}
/// Applies the given color as a tint color.
func tinted(using color: UIColor) -> CIImage?
{
guard
let transparentQRImage = transparent,
let filter = CIFilter(name: "CIMultiplyCompositing"),
let colorFilter = CIFilter(name: "CIConstantColorGenerator") else { return nil }
let ciColor = CIColor(color: color)
colorFilter.setValue(ciColor, forKey: kCIInputColorKey)
let colorImage = colorFilter.outputImage
filter.setValue(colorImage, forKey: kCIInputImageKey)
filter.setValue(transparentQRImage, forKey: kCIInputBackgroundImageKey)
return filter.outputImage!
}
/// Combines the current image with the given image centered.
func combined(with image: CIImage) -> CIImage? {
guard let combinedFilter = CIFilter(name: "CISourceOverCompositing") else { return nil }
let centerTransform = CGAffineTransform(translationX: extent.midX - (image.extent.size.width / 2), y: extent.midY - (image.extent.size.height / 2))
combinedFilter.setValue(image.transformed(by: centerTransform), forKey: "inputImage")
combinedFilter.setValue(self, forKey: "inputBackgroundImage")
return combinedFilter.outputImage!
}
}
and the third step you have to bund the outlet of your imageview in which you want to display generated QRCode.
your ViewController.swift file something like this.
// desired color of QRCode
let OrangeColor = UIColor(red:0.93, green:0.31, blue:0.23, alpha:1.00)
// app logo or whatever UIImage you want to set in the center.
let Logo = UIImage(named: "logo_which_you_want_to_set_in_the center_of_the_QRCode")!
#IBOutlet weak var imgQRImage: UIImageView!
and last and final step add the QRCode to imgQRImage and put the code in your viewDidLoad()
override func viewDidLoad() {
super.viewDidLoad()
let QRLink = "https://www.peerbits.com/"
guard let qrURLImage = URL(string: QRLink)?.qrImage(using: self.OrangeColor, logo: self.Logo)else{return}
self.imgQRImage.image = UIImage(ciImage: qrURLImage)
}

As mention in docs we can use CIQRCodeGenerator
func qrCode(_ outputSize: CGSize) -> UIImage?
{
if let data = data(using: .isoLatin1),
let outputImage = CIFilter(
name: "CIQRCodeGenerator",
parameters: [
"inputMessage": data,
"inputCorrectionLevel": "Q"
]
)?.outputImage {
let size: CGRect = outputImage.extent.integral
let format = UIGraphicsImageRendererFormat()
format.scale = UIScreen.main.scale
return UIGraphicsImageRenderer(size: output, format: format)
.image { _ in
outputImage
.transformed(
by: .init(
scaleX: outputSize.width/size.width,
y: outputSize.height/size.height
)
)
.uiimage
.draw(in: .init(origin: .zero, size: outputSize))
}
} else {
return nil
}
}
extension CIImage {
var uiimage: UIImage {
.init(ciImage: self)
}
}
this is bit modified version of this post
and in case u need to parse qr code image for content
func decodeQRCode(_ image: UIImage?) -> [CIQRCodeFeature]? {
if let image = image,
let ciImage = CIImage(image: image) {
let context = CIContext()
var options: [String: Any] = [
CIDetectorAccuracy: CIDetectorAccuracyHigh
]
let qrDetector = CIDetector(
ofType: CIDetectorTypeQRCode,
context: context,
options: options
)
if ciImage.properties.keys
.contains((kCGImagePropertyOrientation as String)) {
options = [
CIDetectorImageOrientation: ciImage
.properties[(kCGImagePropertyOrientation as String)] as Any
]
} else {
options = [CIDetectorImageOrientation: 1]
}
let features = qrDetector?.features(in: ciImage, options: options)
return features?
.compactMap({ $0 as? CIQRCodeFeature })
}
return nil
}
}

Related

Get Image resolution from an URL in the background thread

I'm trying to get the resolution of an image from URL without actually downloading it. So I have a function for that:
public func resolutionForImage(url: String) -> CGSize? {
guard let url = URL(string: url) else { return nil }
guard let source = CGImageSourceCreateWithURL(url as CFURL, nil) else {
return nil
}
let propertiesOptions = [kCGImageSourceShouldCache: false] as CFDictionary
guard let properties = CGImageSourceCopyPropertiesAtIndex(source, 0, propertiesOptions) as? [CFString: Any] else {
return nil
}
if let width = properties[kCGImagePropertyPixelWidth] as? CGFloat,
let height = properties[kCGImagePropertyPixelHeight] as? CGFloat {
return CGSize(width: width, height: height)
} else {
return nil
}
}
It works fine, but I need to run it in the Background thread, in the main thread in block the UI
And this function is called in an another function in a collectionView cell, so in the end calculateImageHeight output should be in the main thread, could anyone help me manage it, still not in good in the threading
public func calculateImageHeight(ratio: Double = 0.0) -> CGFloat {
var calculatedRatio = ratio
if ratio == 0.0 {
if let size = resolutionForImage(url: imageUrl) {
calculatedRatio = size.height/size.width
}
}
let height = imageViewWidth * calculatedRatio
return height.isFinite ? height : 100
}

Tensor (.tflite) Model inference returning nil using Firebase SDK on Swift

Preface:
My ML (specifically NN() knowledge is very limited and i'm really only getting more and more familiar as time goes on.
Essentially, I have a model that accepts input [1, H, W, 3] (1 image, height, width, 3 channels) and SHOULD output [1, H, W, 2] (1 image, height, width, 2 channels). The idea is that with that, I'll be able to grab image data from the output with 1 of the channels in order to then convert it to an actual image which should essentially display indication AND sort of highlighting if a certain "something" existed in the input image using that 1 color channel (or the other color channel).
The model author is actively working on the model so it's nothing close to a perfect model.
So, with that:
I was initially using the tensorflowlite SDK to do everything but I found that official documentation, examples, and open source work wasn't even close to comparable with Firebase SDK. Plus, the actual project (currently testing this in a test environment) already uses Firebase SDK. Anyway, i was able to get some form of output, but I wasn't normalizing the image properly so the output wasn't as expected but at least there was SOMETHING.
Using this guide on Firebase, I am trying to run an inference on a tflite model.
From the below code you'll see that I have TensorFlowLite as a dependency but i'm not actually ACTIVELY using it. I have a function that uses it but the function isn't called.
So essentially you can ignore: parseOutputTensor, coordinateToIndex, and enum: Constants
Theories:
My modelInputs aren't set up properly.
I'm not correctly looking at the output
I'm not resizing and processing the image correctly before I use it to set the input data for inference
I don't know what I"m doing and i'm way off. D:
Below is my code:
import UIKit
import Firebase
import AVFoundation
import TensorFlowLite
class ViewController: UIViewController {
var captureSesssion : AVCaptureSession!
var cameraOutput : AVCapturePhotoOutput!
var previewLayer : AVCaptureVideoPreviewLayer!
#objc let device = AVCaptureDevice.default(for: .video)!
private var previousInferenceTimeMs: TimeInterval = Date.distantPast.timeIntervalSince1970 * 1000
private let delayBetweenInferencesMs: Double = 1000
#IBOutlet var imageView: UIImageView!
private var button1 : UIButton = {
var button = UIButton()
button.setTitle("button lol", for: .normal)
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(buttonClicked), for: .touchDown)
return button
}()
override func viewDidLoad() {
super.viewDidLoad()
startCamera()
view.addSubview(button1)
view.bringSubviewToFront(button1)
button1.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
button1.titleLabel?.font = UIFont(name: "Helvetica", size: 25)
button1.widthAnchor.constraint(equalToConstant: view.frame.width/3).isActive = true
button1.centerXAnchor.constraint(equalTo: view.centerXAnchor).isActive = true
}
#objc func buttonClicked() {
cameraPressed()
}
private func configureLocalModel() -> CustomLocalModel {
guard let modelPath = Bundle.main.path(forResource: "modelName", ofType: "tflite") else { fatalError("Couldn't find the modelPath") }
return CustomLocalModel(modelPath: modelPath)
}
private func createInterpreter(customLocalModel: CustomLocalModel) -> ModelInterpreter{
return ModelInterpreter.modelInterpreter(localModel: customLocalModel)
}
private func setModelInputOutput() -> ModelInputOutputOptions? {
var ioOptions : ModelInputOutputOptions
do {
ioOptions = ModelInputOutputOptions()
try ioOptions.setInputFormat(index: 0, type: .float32, dimensions: [1, 512, 512, 3])
try ioOptions.setOutputFormat(index: 0, type: .float32, dimensions: [1, 512, 512, 2])
} catch let error as NSError {
print("Failed to set input or output format with error: \(error.localizedDescription)")
}
return ioOptions
}
private func inputDataForInference(theImage: CGImage) -> ModelInputs?{
let image: CGImage = theImage
guard let context = CGContext(
data: nil,
width: image.width, height: image.height,
bitsPerComponent: 8, bytesPerRow: image.width * 4,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue
) else { fatalError("Context issues") }
context.draw(image, in: CGRect(x: 0, y: 0, width: image.width, height: image.height))
guard let imageData = context.data else { fatalError("Context issues") }
let inputs : ModelInputs
var inputData = Data()
do {
for row in 0 ..< 512 {
for col in 0 ..< 512 {
let offset = 4 * (col * context.width + row)
// (Ignore offset 0, the unused alpha channel)
let red = imageData.load(fromByteOffset: offset+1, as: UInt8.self)
let green = imageData.load(fromByteOffset: offset+2, as: UInt8.self)
let blue = imageData.load(fromByteOffset: offset+3, as: UInt8.self)
// Normalize channel values to [0.0, 1.0]. This requirement varies
// by model. For example, some models might require values to be
// normalized to the range [-1.0, 1.0] instead, and others might
// require fixed-point values or the original bytes.
var normalizedRed = Float32(red) / 255.0
var normalizedGreen = Float32(green) / 255.0
var normalizedBlue = Float32(blue) / 255.0
// Append normalized values to Data object in RGB order.
let elementSize = MemoryLayout.size(ofValue: normalizedRed)
var bytes = [UInt8](repeating: 0, count: elementSize)
memcpy(&bytes, &normalizedRed, elementSize)
inputData.append(&bytes, count: elementSize)
memcpy(&bytes, &normalizedGreen, elementSize)
inputData.append(&bytes, count: elementSize)
memcpy(&bytes, &normalizedBlue, elementSize)
inputData.append(&bytes, count: elementSize)
}
}
inputs = ModelInputs()
try inputs.addInput(inputData)
} catch let error {
print("Failed to add input: \(error)")
}
return inputs
}
private func runInterpreter(interpreter: ModelInterpreter, inputs: ModelInputs, ioOptions: ModelInputOutputOptions){
interpreter.run(inputs: inputs, options: ioOptions) { outputs, error in
guard error == nil, let outputs = outputs else { fatalError("interpreter run error is nil or outputs is nil") }
let output = try? outputs.output(index: 0) as? [[NSNumber]]
print()
print("output?[0]: \(output?[0])")
print("output?.count: \(output?.count)")
print("output?.description: \(output?.description)")
}
}
private func gotImage(cgImage: CGImage){
let configuredModel = configureLocalModel()
let interpreter = createInterpreter(customLocalModel: configuredModel)
guard let modelioOptions = setModelInputOutput() else { fatalError("modelioOptions got image error") }
guard let modelInputs = inputDataForInference(theImage: cgImage) else { fatalError("modelInputs got image error") }
runInterpreter(interpreter: interpreter, inputs: modelInputs, ioOptions: modelioOptions)
}
private func resizeImage(image: UIImage, targetSize: CGSize) -> UIImage {
let newSize = CGSize(width: targetSize.width, height: targetSize.height)
// This is the rect that we've calculated out and this is what is actually used below
let rect = CGRect(x: 0, y: 0, width: targetSize.width, height: targetSize.height)
// Actually do the resizing to the rect using the ImageContext stuff
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
image.draw(in: rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
}
extension ViewController: AVCapturePhotoCaptureDelegate{
func startCamera(){
captureSesssion = AVCaptureSession()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSesssion)
captureSesssion.sessionPreset = AVCaptureSession.Preset.photo;
cameraOutput = AVCapturePhotoOutput()
previewLayer.frame = CGRect(x: view.frame.origin.x, y: view.frame.origin.y, width: view.frame.width, height: view.frame.height)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
do {
try device.lockForConfiguration()
} catch {
return
}
device.focusMode = .continuousAutoFocus
device.unlockForConfiguration()
print("startcamera")
if let input = try? AVCaptureDeviceInput(device: device) {
if captureSesssion.canAddInput(input) {
captureSesssion.addInput(input)
if captureSesssion.canAddOutput(cameraOutput) {
captureSesssion.addOutput(cameraOutput)
view.layer.addSublayer(previewLayer)
captureSesssion.startRunning()
}
} else {
print("issue here : captureSesssion.canAddInput")
_ = UIAlertController(title: "Your camera doesn't seem to be working :(", message: "Please make sure your camera works", preferredStyle: .alert)
}
} else {
fatalError("TBPVC -> startCamera() : AVCaptureDeviceInput Error")
}
}
func cameraPressed(){
let outputFormat = [kCVPixelBufferPixelFormatTypeKey as String: kCMPixelFormat_32BGRA]
let settings = AVCapturePhotoSettings(format: outputFormat)
cameraOutput.capturePhoto(with: settings, delegate: self)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
print("got image")
// guard let cgImageFromPhoto = photo.cgImageRepresentation()?.takeRetainedValue() else { fatalError("cgImageRepresentation()?.takeRetainedValue error") }
guard let imageData = photo.fileDataRepresentation() else {
fatalError("Error while generating image from photo capture data.")
}
guard let uiImage = UIImage(data: imageData) else {
fatalError("Unable to generate UIImage from image data.")
}
let tempImage = resizeImage(image: uiImage, targetSize: CGSize(width: 512, height: 512))
// generate a corresponding CGImage
guard let tempCgImage = tempImage.cgImage else {
fatalError("Error generating CGImage")
}
gotImage(cgImage: tempCgImage)
}
#objc func image(_ image: UIImage, didFinishSavingWithError error: Error?, contextInfo: UnsafeRawPointer) {
if let error = error {
let ac = UIAlertController(title: "Save error", message: error.localizedDescription, preferredStyle: .alert)
ac.addAction(UIAlertAction(title: "OK", style: .default))
present(ac, animated: true)
} else {
let ac = UIAlertController(title: "Saved!", message: "Your altered image has been saved to your photos.", preferredStyle: .alert)
ac.addAction(UIAlertAction(title: "OK", style: .default))
present(ac, animated: true)
}
}
}

Why is CIFalseColor recoloring more than requested?

In my ViewController.viewWillAppear:
super.viewWillAppear(animated)
var image = UIImage(named: "colorful")!
let imageView = UIImageView(image: image)
imageView.frame.origin.x = 67.0
imageView.frame.origin.y = 324.0 - image.size.height - 20.0
view.addSubview(imageView)
var image2 = ImagePainter.paint(image: image, from: UIColor(red: 243.0/255.0, green: 209.0/255.0, blue: 0.0/255.0, alpha: 1.0), to: UIColor.blue)!
let image2View = UIImageView(image: image2)
image2View.frame.origin.x = 67.0
image2View.frame.origin.y = 324.0
view.addSubview(image2View)
}
ImagePainter:
import UIKit
import CoreImage
class ImagePainter {
static let context = CIContext()
static func paint(image: UIImage?, from: UIColor, to: UIColor) -> UIImage? {
guard let image = image,
let inputImage = CIImage(image: image) else {
return nil
}
let initFilter = CIFilter(name: "CIFalseColor")
initFilter?.setValue(inputImage, forKey: "inputImage")
initFilter?.setValue(CIColor(color: from), forKey: "inputColor1")
initFilter?.setValue(CIColor(color: to), forKey: "inputColor0")
guard let filter = initFilter,
let filterOutput = filter.outputImage else {
return nil
}
let cgimg = context.createCGImage(filterOutput, from: filterOutput.extent)
let processed = UIImage(cgImage: cgimg!, scale: UIScreen.main.scale, orientation: .up);
return processed
}
}
Before/After CIFalseColor:
This screenshot shows the original image (with the "from" color boxed in blue), and the result of calling CIFalseColor. Nothing is colored as I expected. Clearly I'm doing something wrong, but what?

Taking a square photo with Camera App

I am currently building a camera app and want to make the camera take a square image 375x375 like Instagram and save it like that.
I am able to square off the viewfinder of the camera but it is not taking the picture the right way, also when I save it it saves it in full view. I looked around the other Q&As on there but none of them seem to work with my code.
Can someone please help me figure this out.
import Foundation
import UIKit
import AVFoundation
class CameraViewController: UIViewController{
var captureSession = AVCaptureSession()
var frontCameraDeviceInput: AVCaptureDeviceInput?
var backCameraDeviceInput: AVCaptureDeviceInput?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var image: UIImage?
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
let frontCamera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)
let backCamera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
frontCameraDeviceInput = try? AVCaptureDeviceInput(device: frontCamera!)
backCameraDeviceInput = try? AVCaptureDeviceInput(device: backCamera!)
}
func setupInputOutput() {
captureSession.addInput(backCameraDeviceInput!)
photoOutput = AVCapturePhotoOutput()
photoOutput?.isHighResolutionCaptureEnabled = true
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format:[AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
}
func setupPreviewLayer() {
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = self.view.frame
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
#IBAction func camerButton(_ sender: Any) {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: self)
}
#IBAction func switchCamera(_ sender: Any) {
captureSession.beginConfiguration()
//Change camera device inputs from back to front or opposite
if captureSession.inputs.contains(frontCameraDeviceInput!) == true {
captureSession.removeInput(frontCameraDeviceInput!)
captureSession.addInput(backCameraDeviceInput!)
} else if captureSession.inputs.contains(backCameraDeviceInput!) == true {
captureSession.removeInput(backCameraDeviceInput!)
captureSession.addInput(frontCameraDeviceInput!)
}
//Commit all the configuration changes at once
captureSession.commitConfiguration();
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "previewCameraPhoto" {
let previewVC = segue.destination as! PreviewViewController
previewVC.image = self.image
}
}
}
extension CameraViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
image = UIImage(data: imageData)
performSegue(withIdentifier: "previewCameraPhoto", sender: nil)
}
}
override var prefersStatusBarHidden: Bool
{
return true
}
}
The below lines of code are used to capture Image. I execute them when the capture button is tapped. In your case it is
func camerButton(_ sender: Any)
The definition of methods used is also there below.
DispatchQueue.global(qos: .default).async {
let videoConnection = self.imageOutput.connection(with: AVMediaType.video)
let orientation: UIDeviceOrientation = UIDevice.current.orientation
switch orientation {
case .portrait:
videoConnection?.videoOrientation = .portrait
case .portraitUpsideDown:
videoConnection?.videoOrientation = .portraitUpsideDown
case .landscapeRight:
videoConnection?.videoOrientation = .landscapeLeft
case .landscapeLeft:
videoConnection?.videoOrientation = .landscapeRight
default:
videoConnection?.videoOrientation = .portrait
}
self.imageOutput.captureStillImageAsynchronously(from: videoConnection!) { buffer, _ in
self.session.stopRunning()
guard let b = buffer
else { return }
let data = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(b)
if var image = UIImage(data: data!) {
// Crop the image if the output needs to be square.
if self.configuration.onlySquareImagesFromCamera {
image = self.cropImageToSquare(image)
}
// Flip image if taken form the front camera.
if let device = self.device, device.position == .front {
image = self.flipImage(image: image)
}
DispatchQueue.main.async {
self.didCapturePhoto?(image)
}
}
}
}
The two methods used in this function -
func cropImageToSquare(_ image: UIImage) -> UIImage {
let orientation: UIDeviceOrientation = UIDevice.current.orientation
var imageWidth = image.size.width
var imageHeight = image.size.height
switch orientation {
case .landscapeLeft, .landscapeRight:
// Swap width and height if orientation is landscape
imageWidth = image.size.height
imageHeight = image.size.width
default:
break
}
// The center coordinate along Y axis
let rcy = imageHeight * 0.5
let rect = CGRect(x: rcy - imageWidth * 0.5, y: 0, width: imageWidth, height: imageWidth)
let imageRef = image.cgImage?.cropping(to: rect)
return UIImage(cgImage: imageRef!, scale: 1.0, orientation: image.imageOrientation)
}
// Used when image is taken from the front camera.
func flipImage(image: UIImage!) -> UIImage! {
let imageSize: CGSize = image.size
UIGraphicsBeginImageContextWithOptions(imageSize, true, 1.0)
let ctx = UIGraphicsGetCurrentContext()!
ctx.rotate(by: CGFloat(Double.pi/2.0))
ctx.translateBy(x: 0, y: -imageSize.width)
ctx.scaleBy(x: imageSize.height/imageSize.width, y: imageSize.width/imageSize.height)
ctx.draw(image.cgImage!, in: CGRect(x: 0.0,
y: 0.0,
width: imageSize.width,
height: imageSize.height))
let newImage: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return newImage
}
I should not forget to give credit to the developers of this library - https://github.com/Yummypets/YPImagePicker/blob/2.5.1/Source/Camera/YPCameraVC.swift
Just add this to the image picker and the user will get the option of choosing their preferred crop ratio. the default will be as you wanted..a Square shaped photo
self.ImagePicker.allowsEditing = true

Errors with CIFilter and Swift 2: '_' is not convertible to 'String' + Type of expression is ambiguous without more context

I used this piece of code in Swift 1.2 (adapted from an Objective-C snippet found on SO) to apply a blur effect on an image.
It was working ok but I can't convert it to Swift 2, I'm not sure I understand the error messages and I can't find documentation about what has changed.
Error message:
'_' is not convertible to 'String'
And:
Type of expression is ambiguous without more context
Extension that worked with 1.2:
extension NSImage {
func gaussianBlurOfRadius(radius: CGFloat) -> NSImage {
let image = self
image.lockFocus()
let beginImage = CIImage(data: image.TIFFRepresentation!)
// error message highlights the next line
let params = [kCIInputImageKey: beginImage, kCIInputRadiusKey: radius]
let filter = CIFilter(name: "CIGaussianBlur", withInputParameters: params)
var output = filter.valueForKey("outputImage") as! CIImage
let rect = NSMakeRect(0, 0, self.size.width, self.size.height)
output.drawInRect(rect, fromRect: rect, operation: NSCompositingOperation.CompositeSourceOver, fraction: 1)
image.unlockFocus()
return image
}
}
UPDATE:
Following Duncan's idea, I've added an explicit type to the declaration:
let params: [String: AnyObject] = [kCIInputImageKey: beginImage, kCIInputRadiusKey: radius]
but that doesn't fix it, though it removes the error message about type ambiguity.
extension NSImage {
func gaussianBlurOfRadius(radius: CGFloat) -> NSImage {
let image = self
image.lockFocus()
let beginImage = CIImage(data: image.TIFFRepresentation!)!
let params = [kCIInputImageKey : beginImage, kCIInputRadiusKey: radius]
let filter = CIFilter(name: "CIGaussianBlur", withInputParameters: params)!
let output = filter.valueForKey("outputImage") as! CIImage
let rect = NSMakeRect(0, 0, size.width, size.height)
output.drawInRect(rect, fromRect: rect, operation: NSCompositingOperation.CompositeSourceOver, fraction: 1)
image.unlockFocus()
return image
}
}
or
extension NSImage {
func gaussianBlurOfRadius(radius: CGFloat) -> NSImage {
let image = self
image.lockFocus()
let beginImage = CIImage(data: image.TIFFRepresentation!)
let filter = CIFilter(name: "CIGaussianBlur")!
filter.setValue(beginImage, forKey: kCIInputImageKey)
filter.setValue(radius, forKey: kCIInputRadiusKey)
let output = filter.valueForKey("outputImage") as! CIImage
let rect = NSMakeRect(0, 0, size.width, size.height)
output.drawInRect(rect, fromRect: rect, operation: NSCompositingOperation.CompositeSourceOver, fraction: 1)
image.unlockFocus()
return image
}
}
At a guess, Swift is trying to create a Dictionary object with fixed types.
try
let params: [String: AnyObject] =
[kCIInputImageKey: beginImage, kCIInputRadiusKey: radius]
Or perhaps
let params: NSDictionary =
[kCIInputImageKey: beginImage, kCIInputRadiusKey: radius]