Setting image quality using UIGraphicsPDFRenderer - swift

Trying to add a Aztec barcode to a PDF using UIGraphicsPDFRenderer my issue is that the result is blurry thought the fix was setting interpolationQuality, Thanks for your help.
let renderer = UIGraphicsPDFRenderer(bounds: CGRect(x: 0, y: 0, width: 70.8661, height: 70.8661))
return renderer.pdfData{ ctx in
ctx.beginPage()
ctx.cgContext.interpolationQuality = .none //Doesn't do anything
let barcode = generateQRCode(from: UUID().description)
barcode.draw(in: CGRect(x: 0, y: 0, width: 70.8661, height: 70.8661))
}
func generateQRCode(from string: String) -> UIImage {
filter.message = Data(string.utf8)
if let outputImage = filter.outputImage {
if let cgimg = context.createCGImage(outputImage, from: outputImage.extent) {
return UIImage(cgImage: cgimg)
}
}
return UIImage()
}

Related

Why App is Crashed when Change Uiimage Color

I Have Changed Color Of Uiimage But App is crashed. I do this code in a loop. I have Added Autoreleasepool but not solve my problem.
func withColor(_ color: UIColor) -> UIImage {
//this method is extendtion of UIImage
UIGraphicsBeginImageContextWithOptions(size, false, scale)
guard let ctx = UIGraphicsGetCurrentContext(), let cgImage = cgImage else { return self }
color.setFill()
ctx.translateBy(x: 0, y: size.height)
ctx.scaleBy(x: 1.0, y: -1.0)
ctx.clip(to: CGRect(x: 0, y: 0, width: size.width, height: size.height), mask: cgImage)
ctx.fill(CGRect(x: 0, y: 0, width: size.width, height: size.height))
guard let colored = UIGraphicsGetImageFromCurrentImageContext() else { return self }
UIGraphicsEndImageContext()
return colored
}
func getImage() {
let view = UIView(frame: CGRect(x: 0, y: 0, width: 600, height: 600))
for i in 0..<50{
let url = arrImages[i]
let elemetView = UIImageView(frame: CGRect(x: 0, y: 0, width: 600, height: 600))
elementView.imgVContent.image = UIImage(named: url)
if let imgColor = elementData.bgColor, imgColor != ""{
elementView.image = elementView.image?.withColor(UIColor(hexString: imgColor))
elementView.image.imgColor = UIColor(hexString: imgColor)
}
view.addSubview(elementView)
}
}
you have mentioned that you have used autorelease pool but I could not find it. May be trying using as below and see if it solves the problem.
In your getImage function, use your for loop with autoreleasepool like below.
for i in 0..<50{
autoreleasepool{
let url = arrImages[i]
let elemetView = UIImageView(frame: CGRect(x: 0, y: 0, width: 600, height: 600))
elementView.imgVContent.image = UIImage(named: url)
if let imgColor = elementData.bgColor, imgColor != ""{
elementView.image = elementView.image?.withColor(UIColor(hexString: imgColor))
elementView.image.imgColor = UIColor(hexString: imgColor)
}
view.addSubview(elementView)
}
}

NSImage Getting Resized when I draw Text on it

I have the following code to draw a text over an NSImage.
But the resulting image is getting resized to smaller one when I save it to disk.
What i'm i doing wrong? Please advice
func drawText(image :NSImage) ->NSImage
{
let text = "Sample Text"
let font = NSFont.boldSystemFont(ofSize: 18)
let imageRect = CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height)
let textRect = CGRect(x: 5, y: 5, width: image.size.width - 5, height: image.size.height - 5)
let textStyle = NSMutableParagraphStyle.default().mutableCopy() as! NSMutableParagraphStyle
let textFontAttributes = [
NSFontAttributeName: font,
NSForegroundColorAttributeName: NSColor.white,
NSParagraphStyleAttributeName: textStyle
]
let im:NSImage = NSImage(size: image.size)
let rep:NSBitmapImageRep = NSBitmapImageRep(bitmapDataPlanes: nil, pixelsWide: Int(image.size.width), pixelsHigh: Int(image.size.height), bitsPerSample: 8, samplesPerPixel: 4, hasAlpha: true, isPlanar: false, colorSpaceName: NSCalibratedRGBColorSpace, bytesPerRow: 0, bitsPerPixel: 0)!
im.addRepresentation(rep)
im.lockFocus()
image.draw(in: imageRect)
text.draw(in: textRect, withAttributes: textFontAttributes)
im.unlockFocus()
return im
}
This is a different approach using a temporary NSView to draw the image and the text and cache the result in a new image (code is Swift 4). The benefit it you don't need to deal with pixels
class ImageView : NSView {
var image : NSImage
var text : String
init(image: NSImage, text: String)
{
self.image = image
self.text = text
super.init(frame: NSRect(origin: NSZeroPoint, size: image.size))
}
required init?(coder decoder: NSCoder) { fatalError() }
override func draw(_ dirtyRect: NSRect) {
let font = NSFont.boldSystemFont(ofSize: 18)
let textRect = CGRect(x: 5, y: 5, width: image.size.width - 5, height: image.size.height - 5)
image.draw(in: dirtyRect)
text.draw(in: textRect, withAttributes: [.font: font, .foregroundColor: NSColor.white])
}
var outputImage : NSImage {
let imageRep = bitmapImageRepForCachingDisplay(in: frame)!
cacheDisplay(in: frame, to:imageRep)
let tiffData = imageRep.tiffRepresentation!
return NSImage(data : tiffData)!
}
}
To use it, initialize a view
let image = ... // get some image
let view = ImageView(image: image, text: "Sample Text")
and get the new image
let imageWithText = view.outputImage
Note:
The paragraph style is not used at all, but if you want to create a mutable paragraph style just write
let textStyle = NSMutableParagraphStyle()
Mixed pixel vs point?
Depending on your screen 2x or 3x image is smaller 2 times or 3 times?
Here is more detailed info (scroll down to "Converting between pixels and points")
http://blog.fluidui.com/designing-for-mobile-101-pixels-points-and-resolutions/
But keep in mind that:
NSImage is resolution aware and uses a HiDPI graphics context when you lockFocus on a system with retina screen.
The image dimensions you pass to your NSBitmapImageRep initializer are in points (not pixels). An 150.0 point-wide image therefore uses 300 horizontal pixels in a #2x context.
Source:
How to save PNG file from NSImage (retina issues)
Following simple app works for me. Enjoy ;)
import Cocoa
class ViewController: NSViewController {
func save(image:NSImage, imageURL:String, format:String) -> Bool
{
let bMImg = NSBitmapImageRep(data: (image.tiffRepresentation)!)
switch format {
case ".png":
let filepath = URL(fileURLWithPath: imageURL+".png")
let dataToSave = bMImg?.representation(using: NSBitmapImageRep.FileType.png, properties: [NSBitmapImageRep.PropertyKey.compressionFactor : 1])
do
{
try dataToSave?.write(to: filepath)
return true
} catch {
return false
}
default:
return false
}
}
func draw(text:String, image:NSImage) -> NSImage
{
let font = NSFont.boldSystemFont(ofSize: 18)
let imageRect = CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height)
let textRect = CGRect(x: 5, y: 5, width: image.size.width - 5, height: image.size.height - 5)
let textStyle = NSMutableParagraphStyle.default.mutableCopy() as! NSMutableParagraphStyle
let textFontAttributes = [
NSAttributedStringKey.font: font,
NSAttributedStringKey.foregroundColor: NSColor.white,
NSAttributedStringKey.paragraphStyle: textStyle
]
let im:NSImage = NSImage(size: image.size)
let rep:NSBitmapImageRep = NSBitmapImageRep(bitmapDataPlanes: nil, pixelsWide: Int(image.size.width), pixelsHigh: Int(image.size.height), bitsPerSample: 8, samplesPerPixel: 4, hasAlpha: true, isPlanar: false, colorSpaceName: NSColorSpaceName.calibratedRGB, bytesPerRow: 0, bitsPerPixel: 0)!
im.addRepresentation(rep)
im.lockFocus()
image.draw(in: imageRect)
text.draw(in: textRect, withAttributes: textFontAttributes)
im.unlockFocus()
return im
}
#IBAction func action(_ sender: NSButton) {
let dialog = NSOpenPanel();
dialog.title = "Choose a image...";
dialog.showsResizeIndicator = true;
dialog.showsHiddenFiles = false;
dialog.canChooseDirectories = true;
dialog.canCreateDirectories = true;
dialog.allowsMultipleSelection = false;
dialog.allowedFileTypes = ["png", "jpg"];
if (dialog.runModal() == NSApplication.ModalResponse.OK) {
guard let url = dialog.url,
let imageCIImage = CIImage(contentsOf: url) else {
return
}
let rep: NSCIImageRep = NSCIImageRep(ciImage: imageCIImage)
let nsImage = NSImage(size: rep.size)
nsImage.addRepresentation(rep)
let imageWithText = draw(text:"ABC", image: nsImage)
if (save(image: imageWithText, imageURL: "imageWithText", format: ".png")) {
print("Success")
} else {
print("ERROR:Failed to save image")
}
} else {
// User clicked on "Cancel"
return
}
}
}

Write Text on an NSImage in Swift-OSX

I have the following objective C Code to write text onto an Image. I'm new to Swift. How can I do this in Swift?
float width = 10.0;
float height = 10.0;
NSImage *finalImage = [[NSImage alloc] initWithSize:NSMakeSize(width, height)];
// obtain images - your sources may vary
NSImage *overlay = [[NSImage alloc] initWithData:[NSData dataWithContentsOfFile:#"/path/to/overlay_image.jpg"]];
NSImage *mainImage = [[NSImage alloc] initWithData:[NSData dataWithContentsOfFile:#"/path/to/main_image.jpg"]];
[finalImage lockFocus];
// draw the base image
[mainImage drawInRect:NSMakeRect(0, 0, width, height)
fromRect:NSZeroRect operation:NSCompositeSourceOver fraction:1.0];
// draw the overlay image at some offset point
[overlay drawInRect:NSMakeRect(10, 10, [overlay size].width, [overlay size].height)
fromRect:NSZeroRect operation:NSCompositeSourceOver fraction:1.0];
[finalImage unlockFocus];
NSData *finalData = [finalImage TIFFRepresentation];
[[[NSBitmapImageRep imageRepWithData:finalData] representationUsingType:NSJPEGFileType properties:nil] writeToFile:[NSString stringWithFormat:#"/path/to/folder/new_image.jpg"] atomically:YES];
UPDATE:
I have the following method to draw a string onto an Image..But it when I use this method I get cropped out image portions..Seems something is wrong with it..Please advice.
func drawText(image :NSImage) ->NSImage
{
let text = "Sample Text"
let font = NSFont.boldSystemFont(ofSize: 18)
let imageRect = CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height)
let textRect = CGRect(x: 5, y: 5, width: image.size.width - 5, height: image.size.height - 5)
let textStyle = NSMutableParagraphStyle.default().mutableCopy() as! NSMutableParagraphStyle
let textFontAttributes = [
NSFontAttributeName: font,
NSForegroundColorAttributeName: NSColor.white,
NSParagraphStyleAttributeName: textStyle
]
let im:NSImage = NSImage(size: image.size)
let rep:NSBitmapImageRep = NSBitmapImageRep(bitmapDataPlanes: nil, pixelsWide: Int(image.size.width), pixelsHigh: Int(image.size.height), bitsPerSample: 8, samplesPerPixel: 4, hasAlpha: true, isPlanar: false, colorSpaceName: NSCalibratedRGBColorSpace, bytesPerRow: 0, bitsPerPixel: 0)!
im.addRepresentation(rep)
im.lockFocus()
image.draw(in: imageRect)
text.draw(in: textRect, withAttributes: textFontAttributes)
im.unlockFocus()
return im
}
Here's this code in Swift 3
let width: CGFloat = 10.0
let height: CGFloat = 10.0
let finalImage = NSImage(size: NSMakeSize(width, height))
// obtain images - your sources may vary
var overlay: NSImage?
var mainImage: NSImage?
if let url = URL(string: "/path/to/overlay_image.jpg") {
do {
let data = try Data(contentsOf: url)
overlay = NSImage(data: data)
} catch {
print("Unable to get data")
}
}
if let url = URL(string: "/path/to/main_image.jpg") {
do {
let data = try Data(contentsOf: url)
mainImage = NSImage(data: data)
} catch {
print("Unable to get data")
}
}
finalImage.lockFocus()
// draw the base image
mainImage?.draw(in: NSMakeRect(0, 0, width, height), from: NSZeroRect, operation: NSCompositingOperation.sourceOver, fraction: 1.0)
// draw the overlay image at some offset point
if overlay != nil {
overlay?.draw(in: NSMakeRect(10, 10, overlay!.size.width, overlay!.size.height), from: NSZeroRect, operation: NSCompositingOperation.sourceOver, fraction: 1.0)
}
finalImage.unlockFocus()
if let finalData = finalImage.tiffRepresentation, let url = URL(string: "/path/to/folder/new_image.jpg") {
do {
try NSBitmapImageRep(data: finalData)?.representation(using: NSJPEGFileType, properties: [:])?.write(to: url)
} catch {
print("Failed to write")
}
}

Is there an easier way to setup a pixel buffer for CoreML? [duplicate]

I am trying to get Apple's sample Core ML Models that were demoed at the 2017 WWDC to function correctly. I am using the GoogLeNet to try and classify images (see the Apple Machine Learning Page). The model takes a CVPixelBuffer as an input. I have an image called imageSample.jpg that I'm using for this demo. My code is below:
var sample = UIImage(named: "imageSample")?.cgImage
let bufferThree = getCVPixelBuffer(sample!)
let model = GoogLeNetPlaces()
guard let output = try? model.prediction(input: GoogLeNetPlacesInput.init(sceneImage: bufferThree!)) else {
fatalError("Unexpected runtime error.")
}
print(output.sceneLabel)
I am always getting the unexpected runtime error in the output rather than an image classification. My code to convert the image is below:
func getCVPixelBuffer(_ image: CGImage) -> CVPixelBuffer? {
let imageWidth = Int(image.width)
let imageHeight = Int(image.height)
let attributes : [NSObject:AnyObject] = [
kCVPixelBufferCGImageCompatibilityKey : true as AnyObject,
kCVPixelBufferCGBitmapContextCompatibilityKey : true as AnyObject
]
var pxbuffer: CVPixelBuffer? = nil
CVPixelBufferCreate(kCFAllocatorDefault,
imageWidth,
imageHeight,
kCVPixelFormatType_32ARGB,
attributes as CFDictionary?,
&pxbuffer)
if let _pxbuffer = pxbuffer {
let flags = CVPixelBufferLockFlags(rawValue: 0)
CVPixelBufferLockBaseAddress(_pxbuffer, flags)
let pxdata = CVPixelBufferGetBaseAddress(_pxbuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB();
let context = CGContext(data: pxdata,
width: imageWidth,
height: imageHeight,
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(_pxbuffer),
space: rgbColorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
if let _context = context {
_context.draw(image, in: CGRect.init(x: 0, y: 0, width: imageWidth, height: imageHeight))
}
else {
CVPixelBufferUnlockBaseAddress(_pxbuffer, flags);
return nil
}
CVPixelBufferUnlockBaseAddress(_pxbuffer, flags);
return _pxbuffer;
}
return nil
}
I got this code from a previous StackOverflow post (last answer here). I recognize that the code may not be correct, but I have no idea of how to do this myself. I believe that this is the section that contains the error. The model calls for the following type of input: Image<RGB,224,224>
You don't need to do a bunch of image mangling yourself to use a Core ML model with an image — the new Vision framework can do that for you.
import Vision
import CoreML
let model = try VNCoreMLModel(for: MyCoreMLGeneratedModelClass().model)
let request = VNCoreMLRequest(model: model, completionHandler: myResultsMethod)
let handler = VNImageRequestHandler(url: myImageURL)
handler.perform([request])
func myResultsMethod(request: VNRequest, error: Error?) {
guard let results = request.results as? [VNClassificationObservation]
else { fatalError("huh") }
for classification in results {
print(classification.identifier, // the scene label
classification.confidence)
}
}
The WWDC17 session on Vision should have a bit more info — it's tomorrow afternoon.
You can use a pure CoreML, but you should resize an image to (224,224)
DispatchQueue.global(qos: .userInitiated).async {
// Resnet50 expects an image 224 x 224, so we should resize and crop the source image
let inputImageSize: CGFloat = 224.0
let minLen = min(image.size.width, image.size.height)
let resizedImage = image.resize(to: CGSize(width: inputImageSize * image.size.width / minLen, height: inputImageSize * image.size.height / minLen))
let cropedToSquareImage = resizedImage.cropToSquare()
guard let pixelBuffer = cropedToSquareImage?.pixelBuffer() else {
fatalError()
}
guard let classifierOutput = try? self.classifier.prediction(image: pixelBuffer) else {
fatalError()
}
DispatchQueue.main.async {
self.title = classifierOutput.classLabel
}
}
// ...
extension UIImage {
func resize(to newSize: CGSize) -> UIImage {
UIGraphicsBeginImageContextWithOptions(CGSize(width: newSize.width, height: newSize.height), true, 1.0)
self.draw(in: CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height))
let resizedImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return resizedImage
}
func cropToSquare() -> UIImage? {
guard let cgImage = self.cgImage else {
return nil
}
var imageHeight = self.size.height
var imageWidth = self.size.width
if imageHeight > imageWidth {
imageHeight = imageWidth
}
else {
imageWidth = imageHeight
}
let size = CGSize(width: imageWidth, height: imageHeight)
let x = ((CGFloat(cgImage.width) - size.width) / 2).rounded()
let y = ((CGFloat(cgImage.height) - size.height) / 2).rounded()
let cropRect = CGRect(x: x, y: y, width: size.height, height: size.width)
if let croppedCgImage = cgImage.cropping(to: cropRect) {
return UIImage(cgImage: croppedCgImage, scale: 0, orientation: self.imageOrientation)
}
return nil
}
func pixelBuffer() -> CVPixelBuffer? {
let width = self.size.width
let height = self.size.height
let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue,
kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
var pixelBuffer: CVPixelBuffer?
let status = CVPixelBufferCreate(kCFAllocatorDefault,
Int(width),
Int(height),
kCVPixelFormatType_32ARGB,
attrs,
&pixelBuffer)
guard let resultPixelBuffer = pixelBuffer, status == kCVReturnSuccess else {
return nil
}
CVPixelBufferLockBaseAddress(resultPixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
let pixelData = CVPixelBufferGetBaseAddress(resultPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
guard let context = CGContext(data: pixelData,
width: Int(width),
height: Int(height),
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(resultPixelBuffer),
space: rgbColorSpace,
bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue) else {
return nil
}
context.translateBy(x: 0, y: height)
context.scaleBy(x: 1.0, y: -1.0)
UIGraphicsPushContext(context)
self.draw(in: CGRect(x: 0, y: 0, width: width, height: height))
UIGraphicsPopContext()
CVPixelBufferUnlockBaseAddress(resultPixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
return resultPixelBuffer
}
}
The expected image size for inputs you can find in the mimodel file:
A demo project that uses both pure CoreML and Vision variants you can find here: https://github.com/handsomecode/iOS11-Demos/tree/coreml_vision/CoreML/CoreMLDemo
If the input is UIImage, rather than an URL, and you want to use VNImageRequestHandler, you can use CIImage.
func updateClassifications(for image: UIImage) {
let orientation = CGImagePropertyOrientation(image.imageOrientation)
guard let ciImage = CIImage(image: image) else { return }
let handler = VNImageRequestHandler(ciImage: ciImage, orientation: orientation)
}
From Classifying Images with Vision and Core ML

Cropping a captured still image

I want just a specific part of my captured image to be cropped in a circular shape right before it's viewed.
Here is the code for capturing still images:
#IBAction func takePhoto(sender: UIButton) {
self.stillImageOutput!.captureStillImageAsynchronouslyFromConnection(self.stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo)) { (buffer:CMSampleBuffer!, error:NSError!) -> Void in
var image = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(buffer)
var data_image = UIImage(data: image)
self.previewImage.image = data_image
}
}
Thank you
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(buffer) {
if let image = UIImage(data: imageData) {
let square = image.size.width < image.size.height ? CGSize(width: image.size.width, height: image.size.width) : CGSize(width: image.size.height, height: image.size.height)
let imageView = UIImageView(frame: CGRect(origin: CGPoint(x: 0, y: 0), size: square))
imageView.contentMode = UIViewContentMode.ScaleAspectFill
imageView.image = image
imageView.layer.cornerRadius = square.width/2
imageView.layer.masksToBounds = true
UIGraphicsBeginImageContext(imageView.bounds.size)
imageView.layer.renderInContext(UIGraphicsGetCurrentContext())
let result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
self.previewImage.image = result
}
}