I have the following code to draw a text over an NSImage.
But the resulting image is getting resized to smaller one when I save it to disk.
What i'm i doing wrong? Please advice
func drawText(image :NSImage) ->NSImage
{
let text = "Sample Text"
let font = NSFont.boldSystemFont(ofSize: 18)
let imageRect = CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height)
let textRect = CGRect(x: 5, y: 5, width: image.size.width - 5, height: image.size.height - 5)
let textStyle = NSMutableParagraphStyle.default().mutableCopy() as! NSMutableParagraphStyle
let textFontAttributes = [
NSFontAttributeName: font,
NSForegroundColorAttributeName: NSColor.white,
NSParagraphStyleAttributeName: textStyle
]
let im:NSImage = NSImage(size: image.size)
let rep:NSBitmapImageRep = NSBitmapImageRep(bitmapDataPlanes: nil, pixelsWide: Int(image.size.width), pixelsHigh: Int(image.size.height), bitsPerSample: 8, samplesPerPixel: 4, hasAlpha: true, isPlanar: false, colorSpaceName: NSCalibratedRGBColorSpace, bytesPerRow: 0, bitsPerPixel: 0)!
im.addRepresentation(rep)
im.lockFocus()
image.draw(in: imageRect)
text.draw(in: textRect, withAttributes: textFontAttributes)
im.unlockFocus()
return im
}
This is a different approach using a temporary NSView to draw the image and the text and cache the result in a new image (code is Swift 4). The benefit it you don't need to deal with pixels
class ImageView : NSView {
var image : NSImage
var text : String
init(image: NSImage, text: String)
{
self.image = image
self.text = text
super.init(frame: NSRect(origin: NSZeroPoint, size: image.size))
}
required init?(coder decoder: NSCoder) { fatalError() }
override func draw(_ dirtyRect: NSRect) {
let font = NSFont.boldSystemFont(ofSize: 18)
let textRect = CGRect(x: 5, y: 5, width: image.size.width - 5, height: image.size.height - 5)
image.draw(in: dirtyRect)
text.draw(in: textRect, withAttributes: [.font: font, .foregroundColor: NSColor.white])
}
var outputImage : NSImage {
let imageRep = bitmapImageRepForCachingDisplay(in: frame)!
cacheDisplay(in: frame, to:imageRep)
let tiffData = imageRep.tiffRepresentation!
return NSImage(data : tiffData)!
}
}
To use it, initialize a view
let image = ... // get some image
let view = ImageView(image: image, text: "Sample Text")
and get the new image
let imageWithText = view.outputImage
Note:
The paragraph style is not used at all, but if you want to create a mutable paragraph style just write
let textStyle = NSMutableParagraphStyle()
Mixed pixel vs point?
Depending on your screen 2x or 3x image is smaller 2 times or 3 times?
Here is more detailed info (scroll down to "Converting between pixels and points")
http://blog.fluidui.com/designing-for-mobile-101-pixels-points-and-resolutions/
But keep in mind that:
NSImage is resolution aware and uses a HiDPI graphics context when you lockFocus on a system with retina screen.
The image dimensions you pass to your NSBitmapImageRep initializer are in points (not pixels). An 150.0 point-wide image therefore uses 300 horizontal pixels in a #2x context.
Source:
How to save PNG file from NSImage (retina issues)
Following simple app works for me. Enjoy ;)
import Cocoa
class ViewController: NSViewController {
func save(image:NSImage, imageURL:String, format:String) -> Bool
{
let bMImg = NSBitmapImageRep(data: (image.tiffRepresentation)!)
switch format {
case ".png":
let filepath = URL(fileURLWithPath: imageURL+".png")
let dataToSave = bMImg?.representation(using: NSBitmapImageRep.FileType.png, properties: [NSBitmapImageRep.PropertyKey.compressionFactor : 1])
do
{
try dataToSave?.write(to: filepath)
return true
} catch {
return false
}
default:
return false
}
}
func draw(text:String, image:NSImage) -> NSImage
{
let font = NSFont.boldSystemFont(ofSize: 18)
let imageRect = CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height)
let textRect = CGRect(x: 5, y: 5, width: image.size.width - 5, height: image.size.height - 5)
let textStyle = NSMutableParagraphStyle.default.mutableCopy() as! NSMutableParagraphStyle
let textFontAttributes = [
NSAttributedStringKey.font: font,
NSAttributedStringKey.foregroundColor: NSColor.white,
NSAttributedStringKey.paragraphStyle: textStyle
]
let im:NSImage = NSImage(size: image.size)
let rep:NSBitmapImageRep = NSBitmapImageRep(bitmapDataPlanes: nil, pixelsWide: Int(image.size.width), pixelsHigh: Int(image.size.height), bitsPerSample: 8, samplesPerPixel: 4, hasAlpha: true, isPlanar: false, colorSpaceName: NSColorSpaceName.calibratedRGB, bytesPerRow: 0, bitsPerPixel: 0)!
im.addRepresentation(rep)
im.lockFocus()
image.draw(in: imageRect)
text.draw(in: textRect, withAttributes: textFontAttributes)
im.unlockFocus()
return im
}
#IBAction func action(_ sender: NSButton) {
let dialog = NSOpenPanel();
dialog.title = "Choose a image...";
dialog.showsResizeIndicator = true;
dialog.showsHiddenFiles = false;
dialog.canChooseDirectories = true;
dialog.canCreateDirectories = true;
dialog.allowsMultipleSelection = false;
dialog.allowedFileTypes = ["png", "jpg"];
if (dialog.runModal() == NSApplication.ModalResponse.OK) {
guard let url = dialog.url,
let imageCIImage = CIImage(contentsOf: url) else {
return
}
let rep: NSCIImageRep = NSCIImageRep(ciImage: imageCIImage)
let nsImage = NSImage(size: rep.size)
nsImage.addRepresentation(rep)
let imageWithText = draw(text:"ABC", image: nsImage)
if (save(image: imageWithText, imageURL: "imageWithText", format: ".png")) {
print("Success")
} else {
print("ERROR:Failed to save image")
}
} else {
// User clicked on "Cancel"
return
}
}
}
Related
Trying to add a Aztec barcode to a PDF using UIGraphicsPDFRenderer my issue is that the result is blurry thought the fix was setting interpolationQuality, Thanks for your help.
let renderer = UIGraphicsPDFRenderer(bounds: CGRect(x: 0, y: 0, width: 70.8661, height: 70.8661))
return renderer.pdfData{ ctx in
ctx.beginPage()
ctx.cgContext.interpolationQuality = .none //Doesn't do anything
let barcode = generateQRCode(from: UUID().description)
barcode.draw(in: CGRect(x: 0, y: 0, width: 70.8661, height: 70.8661))
}
func generateQRCode(from string: String) -> UIImage {
filter.message = Data(string.utf8)
if let outputImage = filter.outputImage {
if let cgimg = context.createCGImage(outputImage, from: outputImage.extent) {
return UIImage(cgImage: cgimg)
}
}
return UIImage()
}
This is for Swift 5 on macOS
I am trying to write some text to a generated PDF.
I am able to load a background image onto the pages, but when I call my drawText method, it is not making it onto either of the pages.
I tried drawing an NSString to the context via the .draw() method and that would not work either. I hoping to get this to work so I can add more text, including text boxes, etc.
What am I doing wrong? Thanks for any pointers.
import Cocoa
import CoreText
import Quartz
extension NSImage {
/*
Converts an NSImage to a CGImage for rendering in a CGContext
Credit - Xue Yu
- https://gist.github.com/KrisYu/83d7d97cae35a0b10fd238e5c86d288f
*/
var toCGImage: CGImage {
var imageRect = NSRect(x: 0, y: 0, width: pageWidth, height: pageHeight)
guard let image = cgImage(forProposedRect: &imageRect, context: nil, hints: nil) else {
abort()
}
return image
}
}
class PDFText {
/*
Create a non-nil CGContext
Credit - hmali - 3/15/2019
https://stackoverflow.com/questions/41100895/empty-cgcontext
*/
var pdfContext = CGContext(data: nil,
width: 0,
height: 0,
bitsPerComponent: 1,
bytesPerRow: 1,
space: CGColorSpace.init(name: CGColorSpace.sRGB)!,
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)
let textRect = CGRect(x: 295, y: 350, width: 100, height: 100)
func createPDF() {
let filePath = "/Users/Shared/Text.pdf"
let fileURL = NSURL(fileURLWithPath: filePath)
pdfContext = CGContext(fileURL, mediaBox: &backgroundRect, nil)
pdfContext!.beginPDFPage(nil)
drawBackground()
drawText("This is page 1")
pdfContext!.endPDFPage()
pdfContext!.beginPDFPage(nil)
drawBackground()
drawText("This is page 1")
pdfContext!.endPDFPage()
pdfContext!.closePDF()
}
func drawBackground() {
let cgImage = NSImage(contentsOfFile: "/Users/Shared/background.png")?.toCGImage
pdfContext?.draw(cgImage!, in: CGRect(x: 0, y: 0, width: Int(72*8.5), height: Int(72*11)))
}
func drawText(_ text:String) {
let style = NSMutableParagraphStyle()
style.alignment = .center
let attr = [NSAttributedString.Key.font: NSFont(name: "Helvetica", size: 16.0),
NSAttributedString.Key.foregroundColor: NSColor.purple,
NSAttributedString.Key.backgroundColor: NSColor.clear,
NSAttributedString.Key.paragraphStyle: style]
let attrText = NSAttributedString(string: text, attributes: attr as [NSAttributedString.Key : Any])
pdfContext?.saveGState()
pdfContext?.translateBy(x: attrText.size().width, y: attrText.size().height)
attrText.draw(with: textRect)
pdfContext?.restoreGState()
}
}
Closing an open question that I got worked out (complete code).
Swift 5.4 on macOS
import Cocoa
import CoreText
import Quartz
var pageWidth: CGFloat = 72*8.5
var pageHeight: CGFloat = 72*11.0
var pageRect: CGRect = CGRect(x:0, y:0, width: pageWidth, height: pageHeight)
extension NSImage {
/*
Converts an NSImage to a CGImage for rendering in a CGContext
Credit - Xue Yu
- https://gist.github.com/KrisYu/83d7d97cae35a0b10fd238e5c86d288f
*/
var toCGImage: CGImage {
var imageRect = NSRect(x: 0, y: 0, width: pageWidth, height: pageHeight)
guard let image = cgImage(forProposedRect: &imageRect, context: nil, hints: nil) else {
abort()
}
return image
}
}
class PDFText {
/*
Create a non-nil empty CGContext
Credit - hmali - 3/15/2019
https://stackoverflow.com/questions/41100895/empty-cgcontext
*/
var pdfContext = CGContext(data: nil,
width: 0,
height: 0,
bitsPerComponent: 1,
bytesPerRow: 1,
space: CGColorSpace.init(name: CGColorSpace.sRGB)!,
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)
// Set a rectangle to be in the center of the page
let textRect = CGRect(x: pageRect.midX-50, y: pageRect.midY-50, width: 100, height: 100)
func createPDF() {
let filePath = "/Users/Shared/Text.pdf"
let fileURL = NSURL(fileURLWithPath: filePath)
pdfContext = CGContext(fileURL, mediaBox: &pageRect, nil)
// This must be called to begin a page in a PDF document
pdfContext!.beginPDFPage(nil)
drawBackground()
drawText(text: "This is page 1")
// This has to be called prior to writing another page to the PDF document
pdfContext!.endPDFPage()
pdfContext!.beginPDFPage(nil)
drawBackground()
drawText(text: "This is page 2")
// Call this or before closing the document.
pdfContext!.endPDFPage()
pdfContext!.closePDF()
}
func drawBackground() {
// Draws an image into the graphics context.
// NOTE: If the image is not sized for the specified rectangle it will be
// scaled (up/down) automatically to fit within the rectangle.
let cgImage = NSImage(contentsOfFile: "/Users/Shared/background.png")?.toCGImage
pdfContext?.draw(cgImage!, in: pageRect)
}
func drawText(text:String) {
// Credit: Nutchaphon Rewik, https://github.com/nRewik/SimplePDF
// Create a paragraph style to be used with the atributed string
let paragraphStyle = NSMutableParagraphStyle()
paragraphStyle.alignment = .center
// Set up the sttributes to be applied to the attributed text
let stringAttributes = [NSAttributedString.Key.font: NSFont(name: "Helvetica", size: 16.0),
NSAttributedString.Key.foregroundColor: NSColor.purple,
NSAttributedString.Key.backgroundColor: NSColor.clear,
NSAttributedString.Key.paragraphStyle: paragraphStyle]
// Create the attributed string
let attributedString = NSAttributedString(string: text, attributes: stringAttributes as [NSAttributedString.Key : Any])
// Set up a CoreText frame that encloses the attributed string
let frameSetter = CTFramesetterCreateWithAttributedString(attributedString)
// Get the frame size for the attributed string
let frameSize = CTFramesetterSuggestFrameSizeWithConstraints(frameSetter, CFRangeMake(0, attributedString.string.count), nil, textRect.size, nil)
// Save the Graphics state of the context
pdfContext!.saveGState()
// Put the text matrix into a known state. This ensures that no old scaling
// factors are left in place.
pdfContext!.textMatrix = CGAffineTransform.identity
// Create a path object to enclose the text.
let framePath = CGPath(rect: CGRect(x: textRect.minX, y: textRect.midY-frameSize.height/2, width: textRect.width, height: frameSize.height), transform: nil)
// Get the frame that will do the rendering. The currentRange variable specifies
// only the starting point. The framesetter lays out as much text as will fit into
// the frame or until it runs out of text.
let frameRef = CTFramesetterCreateFrame(frameSetter, CFRange(location: 0, length: 0), framePath, nil)
// Draw the CoreText frame (that includes the text) into the graphics context.
CTFrameDraw(frameRef, pdfContext!)
// Restore the previous Graphics state.
pdfContext?.restoreGState()
}
}
let pdf = PDFText()
pdf.createPDF()
I'm trying to place an icon (in form of an image) next to a text in a UILabel. The icons are imported into the assets in al three sizes and are not blurry at all when I simply place them in a normal UIImageView.
However, within the NSTextAttachment they suddenly become extremely blurry and are too big, as well.
I already tried several things on my own and also tried nearly every snippet I could find online - nothing helps. This is what I'm left over with:
func updateWinnableCoins(coins: Int){
let attachImg = NSTextAttachment()
attachImg.image = resizeImage(image: #imageLiteral(resourceName: "geld"), targetSize: CGSize(width: 17.0, height: 17.0))
attachImg.setImageHeight(height: 17.0)
let imageOffsetY:CGFloat = -3.0;
attachImg.bounds = CGRect(x: 0, y: imageOffsetY, width: attachImg.image!.size.width, height: attachImg.image!.size.height)
let attchStr = NSAttributedString(attachment: attachImg)
let completeText = NSMutableAttributedString(string: "")
let tempText = NSMutableAttributedString(string: "You can win " + String(coins) + " ")
completeText.append(tempText)
completeText.append(attchStr)
self.lblWinnableCoins.textAlignment = .left;
self.lblWinnableCoins.attributedText = completeText;
}
func resizeImage(image: UIImage, targetSize: CGSize) -> (UIImage) {
let newRect = CGRect(x: 0, y: 0, width: targetSize.width, height: targetSize.height).integral
UIGraphicsBeginImageContextWithOptions(targetSize, false, 0)
let context = UIGraphicsGetCurrentContext()
// Set the quality level to use when rescaling
context!.interpolationQuality = CGInterpolationQuality.default
let flipVertical = CGAffineTransform(a: 1, b: 0, c: 0, d: -1, tx: 0, ty: targetSize.height)
context!.concatenate(flipVertical)
// Draw into the context; this scales the image
context?.draw(image.cgImage!, in: CGRect(x: 0.0,y: 0.0, width: newRect.width, height: newRect.height))
let newImageRef = context!.makeImage()! as CGImage
let newImage = UIImage(cgImage: newImageRef)
// Get the resized image from the context and a UIImage
UIGraphicsEndImageContext()
return newImage
}
extension NSTextAttachment {
func setImageHeight(height: CGFloat) {
guard let image = image else { return }
let ratio = image.size.width / image.size.height
bounds = CGRect(x: bounds.origin.x, y: bounds.origin.y, width: ratio * height, height: height)
}
}
And this is how it looks:
The font size of the UILabel is 17, so I set the text attachment to be 17 big, too. When I set it to 9, it fits, but it's still very blurry.
What can I do about that?
I have the following objective C Code to write text onto an Image. I'm new to Swift. How can I do this in Swift?
float width = 10.0;
float height = 10.0;
NSImage *finalImage = [[NSImage alloc] initWithSize:NSMakeSize(width, height)];
// obtain images - your sources may vary
NSImage *overlay = [[NSImage alloc] initWithData:[NSData dataWithContentsOfFile:#"/path/to/overlay_image.jpg"]];
NSImage *mainImage = [[NSImage alloc] initWithData:[NSData dataWithContentsOfFile:#"/path/to/main_image.jpg"]];
[finalImage lockFocus];
// draw the base image
[mainImage drawInRect:NSMakeRect(0, 0, width, height)
fromRect:NSZeroRect operation:NSCompositeSourceOver fraction:1.0];
// draw the overlay image at some offset point
[overlay drawInRect:NSMakeRect(10, 10, [overlay size].width, [overlay size].height)
fromRect:NSZeroRect operation:NSCompositeSourceOver fraction:1.0];
[finalImage unlockFocus];
NSData *finalData = [finalImage TIFFRepresentation];
[[[NSBitmapImageRep imageRepWithData:finalData] representationUsingType:NSJPEGFileType properties:nil] writeToFile:[NSString stringWithFormat:#"/path/to/folder/new_image.jpg"] atomically:YES];
UPDATE:
I have the following method to draw a string onto an Image..But it when I use this method I get cropped out image portions..Seems something is wrong with it..Please advice.
func drawText(image :NSImage) ->NSImage
{
let text = "Sample Text"
let font = NSFont.boldSystemFont(ofSize: 18)
let imageRect = CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height)
let textRect = CGRect(x: 5, y: 5, width: image.size.width - 5, height: image.size.height - 5)
let textStyle = NSMutableParagraphStyle.default().mutableCopy() as! NSMutableParagraphStyle
let textFontAttributes = [
NSFontAttributeName: font,
NSForegroundColorAttributeName: NSColor.white,
NSParagraphStyleAttributeName: textStyle
]
let im:NSImage = NSImage(size: image.size)
let rep:NSBitmapImageRep = NSBitmapImageRep(bitmapDataPlanes: nil, pixelsWide: Int(image.size.width), pixelsHigh: Int(image.size.height), bitsPerSample: 8, samplesPerPixel: 4, hasAlpha: true, isPlanar: false, colorSpaceName: NSCalibratedRGBColorSpace, bytesPerRow: 0, bitsPerPixel: 0)!
im.addRepresentation(rep)
im.lockFocus()
image.draw(in: imageRect)
text.draw(in: textRect, withAttributes: textFontAttributes)
im.unlockFocus()
return im
}
Here's this code in Swift 3
let width: CGFloat = 10.0
let height: CGFloat = 10.0
let finalImage = NSImage(size: NSMakeSize(width, height))
// obtain images - your sources may vary
var overlay: NSImage?
var mainImage: NSImage?
if let url = URL(string: "/path/to/overlay_image.jpg") {
do {
let data = try Data(contentsOf: url)
overlay = NSImage(data: data)
} catch {
print("Unable to get data")
}
}
if let url = URL(string: "/path/to/main_image.jpg") {
do {
let data = try Data(contentsOf: url)
mainImage = NSImage(data: data)
} catch {
print("Unable to get data")
}
}
finalImage.lockFocus()
// draw the base image
mainImage?.draw(in: NSMakeRect(0, 0, width, height), from: NSZeroRect, operation: NSCompositingOperation.sourceOver, fraction: 1.0)
// draw the overlay image at some offset point
if overlay != nil {
overlay?.draw(in: NSMakeRect(10, 10, overlay!.size.width, overlay!.size.height), from: NSZeroRect, operation: NSCompositingOperation.sourceOver, fraction: 1.0)
}
finalImage.unlockFocus()
if let finalData = finalImage.tiffRepresentation, let url = URL(string: "/path/to/folder/new_image.jpg") {
do {
try NSBitmapImageRep(data: finalData)?.representation(using: NSJPEGFileType, properties: [:])?.write(to: url)
} catch {
print("Failed to write")
}
}
I am trying to get Apple's sample Core ML Models that were demoed at the 2017 WWDC to function correctly. I am using the GoogLeNet to try and classify images (see the Apple Machine Learning Page). The model takes a CVPixelBuffer as an input. I have an image called imageSample.jpg that I'm using for this demo. My code is below:
var sample = UIImage(named: "imageSample")?.cgImage
let bufferThree = getCVPixelBuffer(sample!)
let model = GoogLeNetPlaces()
guard let output = try? model.prediction(input: GoogLeNetPlacesInput.init(sceneImage: bufferThree!)) else {
fatalError("Unexpected runtime error.")
}
print(output.sceneLabel)
I am always getting the unexpected runtime error in the output rather than an image classification. My code to convert the image is below:
func getCVPixelBuffer(_ image: CGImage) -> CVPixelBuffer? {
let imageWidth = Int(image.width)
let imageHeight = Int(image.height)
let attributes : [NSObject:AnyObject] = [
kCVPixelBufferCGImageCompatibilityKey : true as AnyObject,
kCVPixelBufferCGBitmapContextCompatibilityKey : true as AnyObject
]
var pxbuffer: CVPixelBuffer? = nil
CVPixelBufferCreate(kCFAllocatorDefault,
imageWidth,
imageHeight,
kCVPixelFormatType_32ARGB,
attributes as CFDictionary?,
&pxbuffer)
if let _pxbuffer = pxbuffer {
let flags = CVPixelBufferLockFlags(rawValue: 0)
CVPixelBufferLockBaseAddress(_pxbuffer, flags)
let pxdata = CVPixelBufferGetBaseAddress(_pxbuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB();
let context = CGContext(data: pxdata,
width: imageWidth,
height: imageHeight,
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(_pxbuffer),
space: rgbColorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
if let _context = context {
_context.draw(image, in: CGRect.init(x: 0, y: 0, width: imageWidth, height: imageHeight))
}
else {
CVPixelBufferUnlockBaseAddress(_pxbuffer, flags);
return nil
}
CVPixelBufferUnlockBaseAddress(_pxbuffer, flags);
return _pxbuffer;
}
return nil
}
I got this code from a previous StackOverflow post (last answer here). I recognize that the code may not be correct, but I have no idea of how to do this myself. I believe that this is the section that contains the error. The model calls for the following type of input: Image<RGB,224,224>
You don't need to do a bunch of image mangling yourself to use a Core ML model with an image — the new Vision framework can do that for you.
import Vision
import CoreML
let model = try VNCoreMLModel(for: MyCoreMLGeneratedModelClass().model)
let request = VNCoreMLRequest(model: model, completionHandler: myResultsMethod)
let handler = VNImageRequestHandler(url: myImageURL)
handler.perform([request])
func myResultsMethod(request: VNRequest, error: Error?) {
guard let results = request.results as? [VNClassificationObservation]
else { fatalError("huh") }
for classification in results {
print(classification.identifier, // the scene label
classification.confidence)
}
}
The WWDC17 session on Vision should have a bit more info — it's tomorrow afternoon.
You can use a pure CoreML, but you should resize an image to (224,224)
DispatchQueue.global(qos: .userInitiated).async {
// Resnet50 expects an image 224 x 224, so we should resize and crop the source image
let inputImageSize: CGFloat = 224.0
let minLen = min(image.size.width, image.size.height)
let resizedImage = image.resize(to: CGSize(width: inputImageSize * image.size.width / minLen, height: inputImageSize * image.size.height / minLen))
let cropedToSquareImage = resizedImage.cropToSquare()
guard let pixelBuffer = cropedToSquareImage?.pixelBuffer() else {
fatalError()
}
guard let classifierOutput = try? self.classifier.prediction(image: pixelBuffer) else {
fatalError()
}
DispatchQueue.main.async {
self.title = classifierOutput.classLabel
}
}
// ...
extension UIImage {
func resize(to newSize: CGSize) -> UIImage {
UIGraphicsBeginImageContextWithOptions(CGSize(width: newSize.width, height: newSize.height), true, 1.0)
self.draw(in: CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height))
let resizedImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return resizedImage
}
func cropToSquare() -> UIImage? {
guard let cgImage = self.cgImage else {
return nil
}
var imageHeight = self.size.height
var imageWidth = self.size.width
if imageHeight > imageWidth {
imageHeight = imageWidth
}
else {
imageWidth = imageHeight
}
let size = CGSize(width: imageWidth, height: imageHeight)
let x = ((CGFloat(cgImage.width) - size.width) / 2).rounded()
let y = ((CGFloat(cgImage.height) - size.height) / 2).rounded()
let cropRect = CGRect(x: x, y: y, width: size.height, height: size.width)
if let croppedCgImage = cgImage.cropping(to: cropRect) {
return UIImage(cgImage: croppedCgImage, scale: 0, orientation: self.imageOrientation)
}
return nil
}
func pixelBuffer() -> CVPixelBuffer? {
let width = self.size.width
let height = self.size.height
let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue,
kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
var pixelBuffer: CVPixelBuffer?
let status = CVPixelBufferCreate(kCFAllocatorDefault,
Int(width),
Int(height),
kCVPixelFormatType_32ARGB,
attrs,
&pixelBuffer)
guard let resultPixelBuffer = pixelBuffer, status == kCVReturnSuccess else {
return nil
}
CVPixelBufferLockBaseAddress(resultPixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
let pixelData = CVPixelBufferGetBaseAddress(resultPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
guard let context = CGContext(data: pixelData,
width: Int(width),
height: Int(height),
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(resultPixelBuffer),
space: rgbColorSpace,
bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue) else {
return nil
}
context.translateBy(x: 0, y: height)
context.scaleBy(x: 1.0, y: -1.0)
UIGraphicsPushContext(context)
self.draw(in: CGRect(x: 0, y: 0, width: width, height: height))
UIGraphicsPopContext()
CVPixelBufferUnlockBaseAddress(resultPixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
return resultPixelBuffer
}
}
The expected image size for inputs you can find in the mimodel file:
A demo project that uses both pure CoreML and Vision variants you can find here: https://github.com/handsomecode/iOS11-Demos/tree/coreml_vision/CoreML/CoreMLDemo
If the input is UIImage, rather than an URL, and you want to use VNImageRequestHandler, you can use CIImage.
func updateClassifications(for image: UIImage) {
let orientation = CGImagePropertyOrientation(image.imageOrientation)
guard let ciImage = CIImage(image: image) else { return }
let handler = VNImageRequestHandler(ciImage: ciImage, orientation: orientation)
}
From Classifying Images with Vision and Core ML