Take a snapshot of current screen with Metal in swift - swift

I tried:
let scale = UIScreen.mainScreen().scale
UIGraphicsBeginImageContextWithOptions(metalLayer.bounds.size, false, scale)
// metalLayer.renderInContext(UIGraphicsGetCurrentContext()!)
// self.view.layer ...
metalLayer.presentationLayer()!.renderInContext(UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
But the result is an empty screenshot. Any help would be nice!
Please keep in mind that I want to take a snapshot of a CAMetalLayer.

To make a screenshot, you need to get MTLTexture of the frame buffer.
1. If you use MTKView:
let texture = view.currentDrawable!.texture
2. If you don't use MTKView
Here's what I would do - I would have a property which holds last drawable presented to the screen:
let lastDrawableDisplayed: CAMetalDrawable?
And then when you present drawable to the screen, I would update it:
let commandBuffer = commandQueue.commandBuffer()
commandBuffer.addCompletedHandler { buffer in
self.lastDrawableDisplayed = drawable
}
Now you whenever you need to take a screenshot, you can get a texture like this:
let texture = lastDrawableDisplayed.texture
Ok, now when you have MTLTexture you can convert it to CGImage and then to UIImage or NSImage.
Here's the code for OS X playground (MetalKit.MTLTextureLoader is not available for iOS playgrounds), in which I convert MTLTexture to CGImage
I made a small extension over MTLTexture for this.
import Metal
import MetalKit
import Cocoa
let device = MTLCreateSystemDefaultDevice()!
let textureLoader = MTKTextureLoader(device: device)
let path = "path/to/your/image.jpg"
let data = NSData(contentsOfFile: path)!
let texture = try! textureLoader.newTextureWithData(data, options: nil)
extension MTLTexture {
func bytes() -> UnsafeMutablePointer<Void> {
let width = self.width
let height = self.height
let rowBytes = self.width * 4
let p = malloc(width * height * 4)
self.getBytes(p, bytesPerRow: rowBytes, fromRegion: MTLRegionMake2D(0, 0, width, height), mipmapLevel: 0)
return p
}
func toImage() -> CGImage? {
let p = bytes()
let pColorSpace = CGColorSpaceCreateDeviceRGB()
let rawBitmapInfo = CGImageAlphaInfo.NoneSkipFirst.rawValue | CGBitmapInfo.ByteOrder32Little.rawValue
let bitmapInfo:CGBitmapInfo = CGBitmapInfo(rawValue: rawBitmapInfo)
let selftureSize = self.width * self.height * 4
let rowBytes = self.width * 4
let provider = CGDataProviderCreateWithData(nil, p, selftureSize, nil)
let cgImageRef = CGImageCreate(self.width, self.height, 8, 32, rowBytes, pColorSpace, bitmapInfo, provider, nil, true, CGColorRenderingIntent.RenderingIntentDefault)!
return cgImageRef
}
}
if let imageRef = texture.toImage() {
let image = NSImage(CGImage: imageRef, size: NSSize(width: texture.width, height: texture.height))
}

For swift 4.0,
Just converting code provided by haawa
let lastDrawableDisplayed = metalView?.currentDrawable?.texture
if let imageRef = lastDrawableDisplayed?.toImage() {
let uiImage:UIImage = UIImage.init(cgImage: imageRef)
}
extension MTLTexture {
func bytes() -> UnsafeMutableRawPointer {
let width = self.width
let height = self.height
let rowBytes = self.width * 4
let p = malloc(width * height * 4)
self.getBytes(p!, bytesPerRow: rowBytes, from: MTLRegionMake2D(0, 0, width, height), mipmapLevel: 0)
return p!
}
func toImage() -> CGImage? {
let p = bytes()
let pColorSpace = CGColorSpaceCreateDeviceRGB()
let rawBitmapInfo = CGImageAlphaInfo.noneSkipFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
let bitmapInfo:CGBitmapInfo = CGBitmapInfo(rawValue: rawBitmapInfo)
let selftureSize = self.width * self.height * 4
let rowBytes = self.width * 4
let releaseMaskImagePixelData: CGDataProviderReleaseDataCallback = { (info: UnsafeMutableRawPointer?, data: UnsafeRawPointer, size: Int) -> () in
return
}
let provider = CGDataProvider(dataInfo: nil, data: p, size: selftureSize, releaseData: releaseMaskImagePixelData)
let cgImageRef = CGImage(width: self.width, height: self.height, bitsPerComponent: 8, bitsPerPixel: 32, bytesPerRow: rowBytes, space: pColorSpace, bitmapInfo: bitmapInfo, provider: provider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.defaultIntent)!
return cgImageRef
}
}

I didn't manage to get the accepted answer to work in Swift 4 / Metal 2 with XCode 9.1 on an iPhone 6s. Therefore I used a slightly different approach assuming lastDrawableDisplayed is saved as described in the accepted answer. Quick and dirty and without any exception handling:
let context = CIContext()
let texture = self.lastDrawableDisplayed!.texture
let cImg = CIImage(mtlTexture: texture, options: nil)!
let cgImg = context.createCGImage(cImg, from: cImg.extent)!
let uiImg = UIImage(cgImage: cgImg)
This is based on the documentation on the used CIImage Initializer:
init(mtlTexture:options:) Initializes an image object with data supplied by a Metal texture.
and CIImage Processing which describes how to create a CGImage with the use of CIContext:
CIContext() Create[s] a CIContext object (with default options) [...] context.createCGImage Render[s] the output image to a Core Graphics image that you can display or save to a file.
Hope that helps for anyone using Swift 4.
Edit: Additionally, I have multiple overlaying CAMetalLayer in my project and want to combine them into one single UIImage. Therefore it is needed to have references to the last CAMetalDrawable object of each layer. Before a new layer is added (and therefore used as the provider of nextDrawable()) I simply add the lastDrawableDisplayed to an array [CAMetalDrawable]. When "exporting" the layers I simply write all UIImages subsequently into a bitmap-based graphics context and get the final image with UIGraphicsGetImageFromCurrentImageContext().
Edit: If you are having trouble with orientation, try the following:
let uiImg = UIImage(cgImage: cgImg, scale: 1.0, orientation: UIImageOrientation.downMirrored)

MTLTexture's method toImage needs to release data's memory in release data callback:
let releaseMaskImagePixelData: CGDataProviderReleaseDataCallback = { (info: UnsafeMutableRawPointer?, data: UnsafeRawPointer, size: Int) -> () in
data.dealloc()
return
}

swift 4.2
extension MTLTexture {
func bytes() -> UnsafeMutableRawPointer {
let width = self.width
let height = self.height
let rowBytes = self.width * 4
let p = malloc(width * height * 4)
self.getBytes(p!, bytesPerRow: rowBytes, from: MTLRegionMake2D(0, 0, width, height), mipmapLevel: 0)
return p!
}
func toImage() -> CGImage? {
let p = bytes()
let pColorSpace = CGColorSpaceCreateDeviceRGB()
let rawBitmapInfo = CGImageAlphaInfo.noneSkipFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
let bitmapInfo:CGBitmapInfo = CGBitmapInfo(rawValue: rawBitmapInfo)
let selftureSize = self.width * self.height * 4
let rowBytes = self.width * 4
let releaseMaskImagePixelData: CGDataProviderReleaseDataCallback = { (info: UnsafeMutableRawPointer?, data: UnsafeRawPointer, size: Int) -> () in
return
}
let provider = CGDataProvider(dataInfo: nil, data: p, size: selftureSize, releaseData: releaseMaskImagePixelData)
let cgImageRef = CGImage(width: self.width, height: self.height, bitsPerComponent: 8, bitsPerPixel: 32, bytesPerRow: rowBytes, space: pColorSpace, bitmapInfo: bitmapInfo, provider: provider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.defaultIntent)!
return cgImageRef
}
}

Related

Incorrect saving of transparent UIImage to Photo Library as png with UIImageWriteToSavedPhotosAlbum

I have a function cropAlpha() that trims the extra space defined by the transparency.
func cropAlpha() -> UIImage {
let cgImage = self.cgImage!
let width = cgImage.width
let height = cgImage.height
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bytesPerPixel:Int = 4
let bytesPerRow = bytesPerPixel * width
let bitsPerComponent = 8
let bitmapInfo: UInt32 = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Big.rawValue
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo),
let ptr = context.data?.assumingMemoryBound(to: UInt8.self)
else { return self }
context.draw(self.cgImage!, in: CGRect(x: 0, y: 0, width: width, height: height))
var minX = width
var minY = height
var maxX: Int = 0
var maxY: Int = 0
for x in 1 ..< width {
for y in 1 ..< height {
let i = bytesPerRow * Int(y) + bytesPerPixel * Int(x)
let a = CGFloat(ptr[i + 3]) / 255.0
if a == 1 {
if (x < minX) { minX = x }
if (x > maxX) { maxX = x }
if (y < minY) { minY = y }
if (y > maxY) { maxY = y }
}
}
}
let rect = CGRect(x: CGFloat(minX),y: CGFloat(minY), width: CGFloat(maxX - minX), height: CGFloat(maxY-minY))
let croppedImage = self.cgImage!.cropping(to: rect)!
let ret = UIImage(cgImage: croppedImage)
return ret
}
The image returned by this function has transparent elements and I put it in the ImageView: presenterImageView.image = imagePNG. It works as it should. But when I try to save UIImage to Photo Gallery, transparent background turns white.
let image = maskedImage?.cropAlpha()
let imagePNGData = image!.pngData()
let imagePNG = UIImage(data: imagePNGData!)
UIImageWriteToSavedPhotosAlbum(imagePNG!, nil, nil, nil)
If I don't use that function, I get the result I want, but the image has too much wasted space. I don't understand what could be the reason. Any ideas?
The problem is that UIImageWriteToSavedPhotosAlbum does not handle properly saving a UIImage with premultiplied alpha (or at least the result of saving such image is not what you expect) and your cropping method uses premultipliedLast format. You also can't just simply change CGImageAlphaInfo to a non-premultiplied format because it is not supported there (you will see an error CGBitmapContextCreate: unsupported parameter combination if you try that). But what you can do is convert the cropped image to CIImage, unpremultiply alpha and convert back to UIImage. To do that your saving code could look like below (however I recommend removing force unwrapping from this code if you plan to use it in final app):
let image = maskedImage?.cropAlpha()
let ciImage = CIImage(image: image!)!.unpremultiplyingAlpha()
let uiImage = UIImage(ciImage: ciImage)
let imagePNGData = uiImage.pngData()
let imagePNG = UIImage(data: imagePNGData!)
UIImageWriteToSavedPhotosAlbum(imagePNG!, nil, nil, nil)

Convert raw image data to jpeg in swift

I am trying to convert the raw image data to jpeg in swift. But unfortunately, the jpeg image created is skewed. Please find below the code used for the conversion.
let rawPtr = (rawImgData as NSData).bytes
let mutableRawPtr = UnsafeMutableRawPointer.init(mutating: rawPtr)
let context = CGContext.init(data: mutableRawPtr,
width: 750,
height: 1334,
bitsPerComponent: 32,
bytesPerRow: (8 * 750)/8,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue())
let imageRef = CGContext.makeImage(context!)
let imageRep = NSBitmapImageRep(cgImage: imageRef()!)
let finalData = imageRep.representation(using: .jpeg,
properties: [NSBitmapImageRep.PropertyKey.compressionFactor : 0.5])
Here's the converted jpeg image
Any help or pointers would be greatly appreciated. Thanks in advance!
Finally figured out the answer
var width:size_t?
var height:size_t?
let bitsPerComponent:size_t = 8
var bytesPerRow:size_t?
var bitmapInfo: UInt32
if #available(OSX 10.12, *) {
bitmapInfo = UInt32(CGImageAlphaInfo.premultipliedFirst.rawValue) | UInt32(CGImageByteOrderInfo.order32Little.rawValue)
} else {
bitmapInfo = UInt32(CGImageAlphaInfo.premultipliedFirst.rawValue)
}
let colorSpace = CGColorSpaceCreateDeviceRGB()
do {
let streamAttributesFuture = self.bitmapStream?.streamAttributes()
streamAttributesFuture?.onQueue(.main, notifyOfCompletion: { (streamAttributesFut) in
})
try streamAttributesFuture?.await()
let dict = streamAttributesFuture?.result
width = (dict?.attributes["width"] as! Int)
height = (dict?.attributes["height"] as! Int)
bytesPerRow = (dict?.attributes["row_size"] as! Int)
} catch let frameError{
print("frame error = \(frameError)")
return
}
let rawPtr = (data as NSData).bytes
let mutableRawPtr = UnsafeMutableRawPointer.init(mutating: rawPtr)
let context = CGContext.init(data: mutableRawPtr, width: width!, height: height!, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow!, space: colorSpace, bitmapInfo: bitmapInfo)
if context == nil {
return
}
let imageRef = context?.makeImage()
let scaledWidth = Int(Double(width!) / 1.5)
let scaledHeight = Int(Double(height!) / 1.5)
let resizeCtx = CGContext.init(data: nil, width: scaledWidth, height: scaledHeight, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow!, space: colorSpace, bitmapInfo: bitmapInfo)
resizeCtx?.draw(imageRef!, in: CGRect(x: 0, y: 0, width: scaledWidth, height: scaledHeight))
resizeCtx?.interpolationQuality = .low
let resizedImgRef = resizeCtx?.makeImage()
let imageRep = NSBitmapImageRep(cgImage: resizedImgRef!)
let finalData = imageRep.representation(using: .jpeg, properties: [NSBitmapImageRep.PropertyKey.compressionFactor : compression])
Try this, change imageName to what you want, and save it in any Directory
func convertToJPEG(image: UIImage) {
if let jpegImage = UIImageJPEGRepresentation(image, 1.0){
do {
//Convert
let tempDirectoryURL = URL(fileURLWithPath: NSTemporaryDirectory(), isDirectory: true)
let newFileName = "\(imageName.append(".JPG"))"
let targetURL = tempDirectoryURL.appendingPathComponent(newFileName)
try jpegImage.write(to: targetURL, options: [])
}catch {
print (error.localizedDescription)
print("FAILED")
}
}else{
print("FAILED")
}
}

Is there an easier way to setup a pixel buffer for CoreML? [duplicate]

I am trying to get Apple's sample Core ML Models that were demoed at the 2017 WWDC to function correctly. I am using the GoogLeNet to try and classify images (see the Apple Machine Learning Page). The model takes a CVPixelBuffer as an input. I have an image called imageSample.jpg that I'm using for this demo. My code is below:
var sample = UIImage(named: "imageSample")?.cgImage
let bufferThree = getCVPixelBuffer(sample!)
let model = GoogLeNetPlaces()
guard let output = try? model.prediction(input: GoogLeNetPlacesInput.init(sceneImage: bufferThree!)) else {
fatalError("Unexpected runtime error.")
}
print(output.sceneLabel)
I am always getting the unexpected runtime error in the output rather than an image classification. My code to convert the image is below:
func getCVPixelBuffer(_ image: CGImage) -> CVPixelBuffer? {
let imageWidth = Int(image.width)
let imageHeight = Int(image.height)
let attributes : [NSObject:AnyObject] = [
kCVPixelBufferCGImageCompatibilityKey : true as AnyObject,
kCVPixelBufferCGBitmapContextCompatibilityKey : true as AnyObject
]
var pxbuffer: CVPixelBuffer? = nil
CVPixelBufferCreate(kCFAllocatorDefault,
imageWidth,
imageHeight,
kCVPixelFormatType_32ARGB,
attributes as CFDictionary?,
&pxbuffer)
if let _pxbuffer = pxbuffer {
let flags = CVPixelBufferLockFlags(rawValue: 0)
CVPixelBufferLockBaseAddress(_pxbuffer, flags)
let pxdata = CVPixelBufferGetBaseAddress(_pxbuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB();
let context = CGContext(data: pxdata,
width: imageWidth,
height: imageHeight,
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(_pxbuffer),
space: rgbColorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
if let _context = context {
_context.draw(image, in: CGRect.init(x: 0, y: 0, width: imageWidth, height: imageHeight))
}
else {
CVPixelBufferUnlockBaseAddress(_pxbuffer, flags);
return nil
}
CVPixelBufferUnlockBaseAddress(_pxbuffer, flags);
return _pxbuffer;
}
return nil
}
I got this code from a previous StackOverflow post (last answer here). I recognize that the code may not be correct, but I have no idea of how to do this myself. I believe that this is the section that contains the error. The model calls for the following type of input: Image<RGB,224,224>
You don't need to do a bunch of image mangling yourself to use a Core ML model with an image — the new Vision framework can do that for you.
import Vision
import CoreML
let model = try VNCoreMLModel(for: MyCoreMLGeneratedModelClass().model)
let request = VNCoreMLRequest(model: model, completionHandler: myResultsMethod)
let handler = VNImageRequestHandler(url: myImageURL)
handler.perform([request])
func myResultsMethod(request: VNRequest, error: Error?) {
guard let results = request.results as? [VNClassificationObservation]
else { fatalError("huh") }
for classification in results {
print(classification.identifier, // the scene label
classification.confidence)
}
}
The WWDC17 session on Vision should have a bit more info — it's tomorrow afternoon.
You can use a pure CoreML, but you should resize an image to (224,224)
DispatchQueue.global(qos: .userInitiated).async {
// Resnet50 expects an image 224 x 224, so we should resize and crop the source image
let inputImageSize: CGFloat = 224.0
let minLen = min(image.size.width, image.size.height)
let resizedImage = image.resize(to: CGSize(width: inputImageSize * image.size.width / minLen, height: inputImageSize * image.size.height / minLen))
let cropedToSquareImage = resizedImage.cropToSquare()
guard let pixelBuffer = cropedToSquareImage?.pixelBuffer() else {
fatalError()
}
guard let classifierOutput = try? self.classifier.prediction(image: pixelBuffer) else {
fatalError()
}
DispatchQueue.main.async {
self.title = classifierOutput.classLabel
}
}
// ...
extension UIImage {
func resize(to newSize: CGSize) -> UIImage {
UIGraphicsBeginImageContextWithOptions(CGSize(width: newSize.width, height: newSize.height), true, 1.0)
self.draw(in: CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height))
let resizedImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return resizedImage
}
func cropToSquare() -> UIImage? {
guard let cgImage = self.cgImage else {
return nil
}
var imageHeight = self.size.height
var imageWidth = self.size.width
if imageHeight > imageWidth {
imageHeight = imageWidth
}
else {
imageWidth = imageHeight
}
let size = CGSize(width: imageWidth, height: imageHeight)
let x = ((CGFloat(cgImage.width) - size.width) / 2).rounded()
let y = ((CGFloat(cgImage.height) - size.height) / 2).rounded()
let cropRect = CGRect(x: x, y: y, width: size.height, height: size.width)
if let croppedCgImage = cgImage.cropping(to: cropRect) {
return UIImage(cgImage: croppedCgImage, scale: 0, orientation: self.imageOrientation)
}
return nil
}
func pixelBuffer() -> CVPixelBuffer? {
let width = self.size.width
let height = self.size.height
let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue,
kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
var pixelBuffer: CVPixelBuffer?
let status = CVPixelBufferCreate(kCFAllocatorDefault,
Int(width),
Int(height),
kCVPixelFormatType_32ARGB,
attrs,
&pixelBuffer)
guard let resultPixelBuffer = pixelBuffer, status == kCVReturnSuccess else {
return nil
}
CVPixelBufferLockBaseAddress(resultPixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
let pixelData = CVPixelBufferGetBaseAddress(resultPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
guard let context = CGContext(data: pixelData,
width: Int(width),
height: Int(height),
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(resultPixelBuffer),
space: rgbColorSpace,
bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue) else {
return nil
}
context.translateBy(x: 0, y: height)
context.scaleBy(x: 1.0, y: -1.0)
UIGraphicsPushContext(context)
self.draw(in: CGRect(x: 0, y: 0, width: width, height: height))
UIGraphicsPopContext()
CVPixelBufferUnlockBaseAddress(resultPixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
return resultPixelBuffer
}
}
The expected image size for inputs you can find in the mimodel file:
A demo project that uses both pure CoreML and Vision variants you can find here: https://github.com/handsomecode/iOS11-Demos/tree/coreml_vision/CoreML/CoreMLDemo
If the input is UIImage, rather than an URL, and you want to use VNImageRequestHandler, you can use CIImage.
func updateClassifications(for image: UIImage) {
let orientation = CGImagePropertyOrientation(image.imageOrientation)
guard let ciImage = CIImage(image: image) else { return }
let handler = VNImageRequestHandler(ciImage: ciImage, orientation: orientation)
}
From Classifying Images with Vision and Core ML

Swift blur extension on UIImage crashes

I have written an extension for UIImage and get nil for the context. Any idea what could be the problem?
public func blur(size: Float) -> UIImage! {
let boxSize = size - (size % 2) + 1
let image = self.CGImage
let inProvider = CGImageGetDataProvider(image)
let height = vImagePixelCount(CGImageGetHeight(image))
let width = vImagePixelCount(CGImageGetWidth(image))
let rowBytes = CGImageGetBytesPerRow(image)
let inBitmapData = CGDataProviderCopyData(inProvider)
let inData = UnsafeMutablePointer<Void>(CFDataGetBytePtr(inBitmapData))
var inBuffer = vImage_Buffer(data: inData, height: height, width: width, rowBytes: rowBytes)
let outData = malloc(CGImageGetBytesPerRow(image) * CGImageGetHeight(image))
var outBuffer = vImage_Buffer(data: outData, height: height, width: width, rowBytes: rowBytes)
let _ = vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer, nil, 0, 0, UInt32(boxSize), UInt32(boxSize), nil, vImage_Flags(kvImageEdgeExtend))
let colorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(outBuffer.data, Int(outBuffer.width), Int(outBuffer.height), 8, outBuffer.rowBytes, colorSpace, CGImageGetBitmapInfo(image).rawValue)
let imageRef = CGBitmapContextCreateImage(context)!
let bluredImage = UIImage(CGImage: imageRef)
free(outData)
return bluredImage
}
The problem was to do the blur on CGImage not UIImage directly:
extension UIImage {
class func blurEffect(cgImage: CGImageRef) -> UIImage! {
return UIImage(CGImage: cgImage)
}
func blurEffect(boxSize: CGFloat) -> UIImage! {
return UIImage(CGImage: bluredCGImage(boxSize))
}
func bluredCGImage(boxSize: CGFloat) -> CGImageRef! {
return CGImage!.blurEffect(boxSize)
}
}
extension CGImage {
func blurEffect(boxSize: CGFloat) -> CGImageRef! {
let boxSize = boxSize - (boxSize % 2) + 1
let inProvider = CGImageGetDataProvider(self)
let height = vImagePixelCount(CGImageGetHeight(self))
let width = vImagePixelCount(CGImageGetWidth(self))
let rowBytes = CGImageGetBytesPerRow(self)
let inBitmapData = CGDataProviderCopyData(inProvider)
let inData = UnsafeMutablePointer<Void>(CFDataGetBytePtr(inBitmapData))
var inBuffer = vImage_Buffer(data: inData, height: height, width: width, rowBytes: rowBytes)
let outData = malloc(CGImageGetBytesPerRow(self) * CGImageGetHeight(self))
var outBuffer = vImage_Buffer(data: outData, height: height, width: width, rowBytes: rowBytes)
vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer, nil, 0, 0, UInt32(boxSize), UInt32(boxSize), nil, vImage_Flags(kvImageEdgeExtend))
let colorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(outBuffer.data, Int(outBuffer.width), Int(outBuffer.height), 8, outBuffer.rowBytes, colorSpace, CGImageGetBitmapInfo(self).rawValue)
let imageRef = CGBitmapContextCreateImage(context)
free(outData)
return imageRef
}
}

How to reconstruct grayscale image from intensity values?

It is commonly required to get the pixel data from an image or reconstruct that image from pixel data. How can I take an image, convert it to an array of pixel values and then reconstruct it using the pixel array in Swift using CoreGraphics?
The quality of the answers to this question have been all over the place so I'd like a canonical answer.
Get pixel values as an array
This function can easily be extended to a color image. For simplicity I'm using grayscale, but I have commented the changes to get RGB.
func pixelValuesFromImage(imageRef: CGImage?) -> (pixelValues: [UInt8]?, width: Int, height: Int)
{
var width = 0
var height = 0
var pixelValues: [UInt8]?
if let imageRef = imageRef {
let totalBytes = imageRef.width * imageRef.height
let colorSpace = CGColorSpaceCreateDeviceGray()
pixelValues = [UInt8](repeating: 0, count: totalBytes)
pixelValues?.withUnsafeMutableBytes({
width = imageRef.width
height = imageRef.height
let contextRef = CGContext(data: $0.baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: width, space: colorSpace, bitmapInfo: 0)
let drawRect = CGRect(x: 0.0, y:0.0, width: CGFloat(width), height: CGFloat(height))
contextRef?.draw(imageRef, in: drawRect)
})
}
return (pixelValues, width, height)
}
Get image from pixel values
I reconstruct an image, in this case grayscale 8-bits per pixel, back into a CGImage.
func imageFromPixelValues(pixelValues: [UInt8]?, width: Int, height: Int) -> CGImage?
{
var imageRef: CGImage?
if let pixelValues = pixelValues {
let bitsPerComponent = 8
let bytesPerPixel = 1
let bitsPerPixel = bytesPerPixel * bitsPerComponent
let bytesPerRow = bytesPerPixel * width
let totalBytes = width * height
let unusedCallback: CGDataProviderReleaseDataCallback = { optionalPointer, pointer, valueInt in }
let providerRef = CGDataProvider(dataInfo: nil, data: pixelValues, size: totalBytes, releaseData: unusedCallback)
let bitmapInfo: CGBitmapInfo = [CGBitmapInfo(rawValue: CGImageAlphaInfo.none.rawValue), CGBitmapInfo(rawValue: CGImageByteOrderInfo.orderDefault.rawValue)]
imageRef = CGImage(width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: bytesPerRow,
space: CGColorSpaceCreateDeviceGray(),
bitmapInfo: bitmapInfo,
provider: providerRef!,
decode: nil,
shouldInterpolate: false,
intent: .defaultIntent)
}
return imageRef
}
Demoing the code in a Playground
You'll need an image copied into the Playground's Resources folder and then change the filename and extension below to match. The result on the last line is a UIImage constructed from the CGImage.
import Foundation
import CoreGraphics
import UIKit
import PlaygroundSupport
let URL = playgroundSharedDataDirectory.appendingPathComponent("zebra.jpg")
print("URL \(URL)")
var image: UIImage? = nil
if FileManager().fileExists(atPath: URL.path) {
do {
try NSData(contentsOf: URL, options: .mappedIfSafe)
} catch let error as NSError {
print ("Error: \(error.localizedDescription)")
}
image = UIImage(contentsOfFile: URL.path)
} else {
print("File not found")
}
let (intensityValues, width, height) = pixelValuesFromImage(imageRef: image?.cgImage)
let roundTrippedImage = imageFromPixelValues(pixelValues: intensityValues, width: width, height: height)
let zebra = UIImage(cgImage: roundTrippedImage!)
I was having trouble getting Cameron's code above to work, so I wanted to test another method. I found Vacawama's code, which relies on ARGB pixels. You can use that solution and convert each grayscale value to an ARGB value by simply mapping on each value:
/// Assuming grayscale pixels contains floats in the range 0...1
let grayscalePixels: [Float] = ...
let pixels = grayscalePixels.map {
let intensity = UInt8(round($0 / Float(UInt8.max)))
return PixelData(a: UInt8.max, r: intensity, g: intensity, b: intensity)
}
let image = UIImage(pixels: pixels, width: width, height: height)