i'm getting a Thread 1: EXC_BAD_ACCESS (code=EXC_I386_GPFLT) when
layer.render(in: context) is called
where i'm trying to create a snapshot image from a custom window
let renderer = UIGraphicsImageRenderer(bounds: bounds, format: .init(for: traitCollection))
return renderer.image { action in
let context = action.cgContext
layer.render(in: context)
}
Use can use bellow extension to get it
private var rendererKey: UInt8 = 0
extension UIView {
var renderer: UIGraphicsImageRenderer! {
get {
guard let rendererInstance = objc_getAssociatedObject(self, &rendererKey) as? UIGraphicsImageRenderer else {
self.renderer = UIGraphicsImageRenderer(bounds: bounds)
return self.renderer
}
return rendererInstance
}
set(newValue) {
objc_setAssociatedObject(self, &rendererKey, newValue, objc_AssociationPolicy.OBJC_ASSOCIATION_RETAIN)
}
}
func snapImageView() -> UIImageView {
let img:UIImage = renderer.image { ctx in
DispatchQueue.main.async {
layer.render(in: ctx.cgContext)
}
}
let imageView:UIImageView = UIImageView(image: img)
imageView.frame = renderer.format.bounds
imageView.clipsToBounds = true
return imageView
}
}
// Generate image and image view of any view instance
let anImageView = yourView.snapImageView()
Related
I'm trying to get the resolution of an image from URL without actually downloading it. So I have a function for that:
public func resolutionForImage(url: String) -> CGSize? {
guard let url = URL(string: url) else { return nil }
guard let source = CGImageSourceCreateWithURL(url as CFURL, nil) else {
return nil
}
let propertiesOptions = [kCGImageSourceShouldCache: false] as CFDictionary
guard let properties = CGImageSourceCopyPropertiesAtIndex(source, 0, propertiesOptions) as? [CFString: Any] else {
return nil
}
if let width = properties[kCGImagePropertyPixelWidth] as? CGFloat,
let height = properties[kCGImagePropertyPixelHeight] as? CGFloat {
return CGSize(width: width, height: height)
} else {
return nil
}
}
It works fine, but I need to run it in the Background thread, in the main thread in block the UI
And this function is called in an another function in a collectionView cell, so in the end calculateImageHeight output should be in the main thread, could anyone help me manage it, still not in good in the threading
public func calculateImageHeight(ratio: Double = 0.0) -> CGFloat {
var calculatedRatio = ratio
if ratio == 0.0 {
if let size = resolutionForImage(url: imageUrl) {
calculatedRatio = size.height/size.width
}
}
let height = imageViewWidth * calculatedRatio
return height.isFinite ? height : 100
}
I'm working on a photo editor and I just switched from a UIImageView to an MTKView for performance reasons. It works great except for some reason on some images there is a red box. The image size changes and the red box gets bigger when displaying a new image.
I am using Mac Catalyst on this
This is my MTKView code
public let colorSpace = CGColorSpaceCreateDeviceRGB()
public lazy var commandQueue: MTLCommandQueue = {
[unowned self] in
if self.device == nil {
self.device = MTLCreateSystemDefaultDevice()
}
return self.device!.makeCommandQueue()!
}()
public lazy var ciContext: CIContext = {
[unowned self] in
if self.device == nil {
self.device = MTLCreateSystemDefaultDevice()
}
let context = CIContext(mtlDevice: self.device!, options: [CIContextOption.highQualityDownsample: true, CIContextOption.priorityRequestLow: false])
return context
}()
public override init(frame frameRect: CGRect, device: MTLDevice? = MTLCreateSystemDefaultDevice()) {
super.init(frame: frameRect,
device: device ?? MTLCreateSystemDefaultDevice())
if super.device == nil
{
fatalError("Device doesn't support Metal")
}
self.framebufferOnly = false
}
required public init(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
self.framebufferOnly = false
self.device = MTLCreateSystemDefaultDevice()
}
/// The image to display
public var image: CIImage?
{
didSet
{
renderImage()
}
}
public var orientation: CGImagePropertyOrientation? {
didSet {
renderImage()
}
}
func renderImage()
{
guard var
image = self.image,
let targetTexture = self.currentDrawable?.texture else
{
print("No texture/image")
return
}
self.drawableSize = image.extent.size
if let orientation = orientation {
image = image.oriented(orientation)
}
let commandBuffer = self.commandQueue.makeCommandBuffer()
let bounds = CGRect(origin: CGPoint.zero, size: self.drawableSize)
let originX = image.extent.origin.x
let originY = image.extent.origin.y
let scaleX = self.drawableSize.width / image.extent.width
let scaleY = self.drawableSize.height / image.extent.height
let scale = min(scaleX, scaleY)
let scaledImage = image
.transformed(by: CGAffineTransform(translationX: -originX, y: -originY))
.transformed(by: CGAffineTransform(scaleX: scale, y: scale))
self.ciContext.render(scaledImage,
to: targetTexture,
commandBuffer: commandBuffer,
bounds: bounds,
colorSpace: self.colorSpace)
commandBuffer?.present(self.currentDrawable!)
commandBuffer?.commit()
self.draw()
self.releaseDrawables()
}
I am currently building a camera app and want to make the camera take a square image 375x375 like Instagram and save it like that.
I am able to square off the viewfinder of the camera but it is not taking the picture the right way, also when I save it it saves it in full view. I looked around the other Q&As on there but none of them seem to work with my code.
Can someone please help me figure this out.
import Foundation
import UIKit
import AVFoundation
class CameraViewController: UIViewController{
var captureSession = AVCaptureSession()
var frontCameraDeviceInput: AVCaptureDeviceInput?
var backCameraDeviceInput: AVCaptureDeviceInput?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var image: UIImage?
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
let frontCamera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)
let backCamera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
frontCameraDeviceInput = try? AVCaptureDeviceInput(device: frontCamera!)
backCameraDeviceInput = try? AVCaptureDeviceInput(device: backCamera!)
}
func setupInputOutput() {
captureSession.addInput(backCameraDeviceInput!)
photoOutput = AVCapturePhotoOutput()
photoOutput?.isHighResolutionCaptureEnabled = true
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format:[AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
}
func setupPreviewLayer() {
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = self.view.frame
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
#IBAction func camerButton(_ sender: Any) {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: self)
}
#IBAction func switchCamera(_ sender: Any) {
captureSession.beginConfiguration()
//Change camera device inputs from back to front or opposite
if captureSession.inputs.contains(frontCameraDeviceInput!) == true {
captureSession.removeInput(frontCameraDeviceInput!)
captureSession.addInput(backCameraDeviceInput!)
} else if captureSession.inputs.contains(backCameraDeviceInput!) == true {
captureSession.removeInput(backCameraDeviceInput!)
captureSession.addInput(frontCameraDeviceInput!)
}
//Commit all the configuration changes at once
captureSession.commitConfiguration();
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "previewCameraPhoto" {
let previewVC = segue.destination as! PreviewViewController
previewVC.image = self.image
}
}
}
extension CameraViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
image = UIImage(data: imageData)
performSegue(withIdentifier: "previewCameraPhoto", sender: nil)
}
}
override var prefersStatusBarHidden: Bool
{
return true
}
}
The below lines of code are used to capture Image. I execute them when the capture button is tapped. In your case it is
func camerButton(_ sender: Any)
The definition of methods used is also there below.
DispatchQueue.global(qos: .default).async {
let videoConnection = self.imageOutput.connection(with: AVMediaType.video)
let orientation: UIDeviceOrientation = UIDevice.current.orientation
switch orientation {
case .portrait:
videoConnection?.videoOrientation = .portrait
case .portraitUpsideDown:
videoConnection?.videoOrientation = .portraitUpsideDown
case .landscapeRight:
videoConnection?.videoOrientation = .landscapeLeft
case .landscapeLeft:
videoConnection?.videoOrientation = .landscapeRight
default:
videoConnection?.videoOrientation = .portrait
}
self.imageOutput.captureStillImageAsynchronously(from: videoConnection!) { buffer, _ in
self.session.stopRunning()
guard let b = buffer
else { return }
let data = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(b)
if var image = UIImage(data: data!) {
// Crop the image if the output needs to be square.
if self.configuration.onlySquareImagesFromCamera {
image = self.cropImageToSquare(image)
}
// Flip image if taken form the front camera.
if let device = self.device, device.position == .front {
image = self.flipImage(image: image)
}
DispatchQueue.main.async {
self.didCapturePhoto?(image)
}
}
}
}
The two methods used in this function -
func cropImageToSquare(_ image: UIImage) -> UIImage {
let orientation: UIDeviceOrientation = UIDevice.current.orientation
var imageWidth = image.size.width
var imageHeight = image.size.height
switch orientation {
case .landscapeLeft, .landscapeRight:
// Swap width and height if orientation is landscape
imageWidth = image.size.height
imageHeight = image.size.width
default:
break
}
// The center coordinate along Y axis
let rcy = imageHeight * 0.5
let rect = CGRect(x: rcy - imageWidth * 0.5, y: 0, width: imageWidth, height: imageWidth)
let imageRef = image.cgImage?.cropping(to: rect)
return UIImage(cgImage: imageRef!, scale: 1.0, orientation: image.imageOrientation)
}
// Used when image is taken from the front camera.
func flipImage(image: UIImage!) -> UIImage! {
let imageSize: CGSize = image.size
UIGraphicsBeginImageContextWithOptions(imageSize, true, 1.0)
let ctx = UIGraphicsGetCurrentContext()!
ctx.rotate(by: CGFloat(Double.pi/2.0))
ctx.translateBy(x: 0, y: -imageSize.width)
ctx.scaleBy(x: imageSize.height/imageSize.width, y: imageSize.width/imageSize.height)
ctx.draw(image.cgImage!, in: CGRect(x: 0.0,
y: 0.0,
width: imageSize.width,
height: imageSize.height))
let newImage: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return newImage
}
I should not forget to give credit to the developers of this library - https://github.com/Yummypets/YPImagePicker/blob/2.5.1/Source/Camera/YPCameraVC.swift
Just add this to the image picker and the user will get the option of choosing their preferred crop ratio. the default will be as you wanted..a Square shaped photo
self.ImagePicker.allowsEditing = true
I am using below code in my project. After update to swift 4 and run my app crashed . How can I fix it?
Code:
func from(systemItem: UIBarButtonSystemItem)-> UIImage? {
let tempItem = UIBarButtonItem(barButtonSystemItem: systemItem, target: nil, action: nil)
// add to toolbar and render it
UIToolbar().setItems([tempItem], animated: false)
// got image from real uibutton
let itemView = tempItem.value(forKey: "view") as! UIView
for view in itemView.subviews {
if let button = view as? UIButton, let imageView = button.imageView {
return imageView.image
}
}
return nil
}
}
extension UITextView {
static let ScrollModeBottom = "UITextFieldScrollModeBottom"
static let ScrollModeUp = "UITextFieldScrollModeUp"
static let ScrollModeMiddle = "UITextFieldScrollModeMiddle"
func scrollToBotom() {
let range = NSMakeRange((text as NSString).length - 1, 1);
scrollRangeToVisible(range);
}
var scrollMode: String {
let scrollViewHeight: Float = Float(frame.size.height)
let scrollContentSizeHeight: Float = Float(contentSize.height)
let scrollOffset: Float = Float(contentOffset.y)
if scrollOffset == 0 {
return UITextView.ScrollModeUp
} else if scrollOffset + scrollViewHeight == scrollContentSizeHeight {
return UITextView.ScrollModeBottom
} else {
return UITextView.ScrollModeMiddle
}
}
}
Thread 1: EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP, subcode=0x0)
// add to toolbar and render it
let bar = UIToolbar()
bar.setItems([tempItem],
animated: false)
bar.snapshotView(afterScreenUpdates: true)
//got image from real uibutton
let itemView = tempItem.value(forKey: "view") as! UIView
for view in itemView.subviews {
if let button = view as? UIButton,
let image = button.imageView?.image {
return image.withRenderingMode(.alwaysTemplate)
}
}
return nil
}
}
I am using Vision framework for iOS 11 to detect text on image.
The texts are getting detected successfully, but how we can get the detected text?
Recognizing text in an image
VNRecognizeTextRequest works starting from iOS 13.0 and macOS 10.15 and higher.
In Apple Vision you can easily extract text from image using VNRecognizeTextRequest class, allowing you to make an image analysis request that finds and recognizes text in an image.
Here's a SwiftUI solution showing you how to do it (tested in Xcode 13.4, iOS 15.5):
import SwiftUI
import Vision
struct ContentView: View {
var body: some View {
ZStack {
Color.black.ignoresSafeArea()
Image("imageText").scaleEffect(0.5)
SomeText()
}
}
}
The logic is the following:
struct SomeText: UIViewRepresentable {
let label = UITextView(frame: .zero)
func makeUIView(context: Context) -> UITextView {
label.backgroundColor = .clear
label.textColor = .systemYellow
label.textAlignment = .center
label.font = .boldSystemFont(ofSize: 25)
return label
}
func updateUIView(_ uiView: UITextView, context: Context) {
let path = Bundle.main.path(forResource: "imageText", ofType: "png")
let url = URL(fileURLWithPath: path!)
let requestHandler = VNImageRequestHandler(url: url, options: [:])
let request = VNRecognizeTextRequest { (request, _) in
guard let obs = request.results as? [VNRecognizedTextObservation]
else { return }
for observation in obs {
let topCan: [VNRecognizedText] = observation.topCandidates(1)
if let recognizedText: VNRecognizedText = topCan.first {
label.text = recognizedText.string
}
}
} // non-realtime asynchronous but accurate text recognition
request.recognitionLevel = VNRequestTextRecognitionLevel.accurate
// nearly realtime but not-accurate text recognition
request.recognitionLevel = VNRequestTextRecognitionLevel.fast
try? requestHandler.perform([request])
}
}
If you wanna know a list of supported languages for recognition, read this post please.
Not exactly a dupe but similar to: Converting a Vision VNTextObservation to a String
You need to either use CoreML or another library to perform OCR (SwiftOCR, etc.)
This will return a overlay image with rectangle box on detected text
Here is the full xcode project
https://github.com/cyruslok/iOS11-Vision-Framework-Demo
Hope it is helpful
// Text Detect
func textDetect(dectect_image:UIImage, display_image_view:UIImageView)->UIImage{
let handler:VNImageRequestHandler = VNImageRequestHandler.init(cgImage: (dectect_image.cgImage)!)
var result_img:UIImage = UIImage.init();
let request:VNDetectTextRectanglesRequest = VNDetectTextRectanglesRequest.init(completionHandler: { (request, error) in
if( (error) != nil){
print("Got Error In Run Text Dectect Request");
}else{
result_img = self.drawRectangleForTextDectect(image: dectect_image,results: request.results as! Array<VNTextObservation>)
}
})
request.reportCharacterBoxes = true
do {
try handler.perform([request])
return result_img;
} catch {
return result_img;
}
}
func drawRectangleForTextDectect(image: UIImage, results:Array<VNTextObservation>) -> UIImage {
let renderer = UIGraphicsImageRenderer(size: image.size)
var t:CGAffineTransform = CGAffineTransform.identity;
t = t.scaledBy( x: image.size.width, y: -image.size.height);
t = t.translatedBy(x: 0, y: -1 );
let img = renderer.image { ctx in
for item in results {
let TextObservation:VNTextObservation = item
ctx.cgContext.setFillColor(UIColor.clear.cgColor)
ctx.cgContext.setStrokeColor(UIColor.green.cgColor)
ctx.cgContext.setLineWidth(1)
ctx.cgContext.addRect(item.boundingBox.applying(t))
ctx.cgContext.drawPath(using: .fillStroke)
for item_2 in TextObservation.characterBoxes!{
let RectangleObservation:VNRectangleObservation = item_2
ctx.cgContext.setFillColor(UIColor.clear.cgColor)
ctx.cgContext.setStrokeColor(UIColor.red.cgColor)
ctx.cgContext.setLineWidth(1)
ctx.cgContext.addRect(RectangleObservation.boundingBox.applying(t))
ctx.cgContext.drawPath(using: .fillStroke)
}
}
}
return img
}