Creating CGImage/UIImage from grayscale matrix - swift

I have a matrix of greyscale image pixels, for example:
[ [0, 0, 125],
[10, 50, 255],
[90, 0, 255] ]
My goal is to apply a tint to it (UIColor) and export a CGImage/UIImage from the structure that holds it.
public typealias Pixel = UInt8
extension UIColor {
var red: Float { return Float(CIColor(color: self).red * 255) }
var green: Float { return Float(CIColor(color: self).green * 255) }
var blue: Float { return Float(CIColor(color: self).blue * 255) }
var alpha: Float { return Float(CIColor(color: self).alpha * 255) }
}
public struct PixelData {
let r: UInt8
let g: UInt8
let b: UInt8
let a: UInt8
}
public struct Map {
let pixelCount: UInt
let pixels: [Pixel] //all pixels of an image, linear
let dimension: UInt //square root of pixel count
let tintColor: UIColor = UIColor(red: 9/255, green: 133/255, blue: 61/255, alpha: 1)
public var image: UIImage? {
var pixelsData = [PixelData]()
pixelsData.reserveCapacity(Int(pixelCount) * 3)
let alpha = UInt8(tintColor.alpha)
let redValue = tintColor.red
let greenValue = tintColor.green
let blueValue = tintColor.blue
let red: [PixelData] = pixels.map {
let redInt: UInt8 = UInt8((Float($0) / 255.0) * redValue)
return PixelData(r: redInt, g: 0, b: 0, a: alpha)
}
let green: [PixelData] = pixels.map {
let greenInt: UInt8 = UInt8((Float($0) / 255.0) * greenValue)
return PixelData(r: 0, g: greenInt, b: 0, a: alpha) }
let blue: [PixelData] = pixels.map {
let blueInt: UInt8 = UInt8((Float($0) / 255.0) * blueValue)
return PixelData(r: 0, g: 0, b: blueInt, a: alpha) }
pixelsData.append(contentsOf: red)
pixelsData.append(contentsOf: green)
pixelsData.append(contentsOf: blue)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
let bitsPerComponent = 8
let bitsPerPixel = 32
let dimension: Int = Int(self.dimension)
var data = pixelsData
guard let providerRef = CGDataProvider(
data: NSData(bytes: &data, length: data.count * MemoryLayout<PixelData>.size)
) else { return nil }
if let cgim = CGImage(
width: dimension,
height: dimension,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: dimension * MemoryLayout<PixelData>.size,
space: rgbColorSpace,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent
) {
return UIImage(cgImage: cgim)
}
return nil
}
}
The problem is the output looks gibberish. I have used this tutorial and this SO thread but with no success. The result in the playground is:
(the output is there, just barely visible)
Any help appreciated!

There are two key issues.
The code is calculating all the red values for every grayscale pixel and creating the four byte PixelData for each (even though only the red channel is populated) and adding that to the pixelsData array. It then repeats that for the green values, and then again for the blue values. That results in three times as much data as one needs for the image, and only the red channel data is being used.
Instead, we should calculate the RGBA values once, create a PixelData for each, and repeat this pixel by pixel.
The premultipliedFirst means ARGB. But your structure is using RGBA, so you want premultipliedLast.
Thus:
func generateTintedImage(completion: #escaping (UIImage?) -> Void) {
DispatchQueue.global(qos: .userInitiated).async {
let image = self.tintedImage()
DispatchQueue.main.async {
completion(image)
}
}
}
private func tintedImage() -> UIImage? {
let tintRed = tintColor.red
let tintGreen = tintColor.green
let tintBlue = tintColor.blue
let tintAlpha = tintColor.alpha
let data = pixels.map { pixel -> PixelData in
let red = UInt8((Float(pixel) / 255) * tintRed)
let green = UInt8((Float(pixel) / 255) * tintGreen)
let blue = UInt8((Float(pixel) / 255) * tintBlue)
let alpha = UInt8(tintAlpha)
return PixelData(r: red, g: green, b: blue, a: alpha)
}.withUnsafeBytes { Data($0) }
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue)
let bitsPerComponent = 8
let bitsPerPixel = 32
guard
let providerRef = CGDataProvider(data: data as CFData),
let cgImage = CGImage(width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: width * MemoryLayout<PixelData>.stride,
space: rgbColorSpace,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent)
else {
return nil
}
return UIImage(cgImage: cgImage)
}
I’ve also renamed a few variables, used stride instead of size, replaced dimension with width and height so I could process non-square images, etc.
I also would advise against using a computed property for anything this computationally intense, so I gave this an asynchronous method, which you might use as follows:
let map = Map(with: image)
map.generateTintedImage { image in
self.tintedImageView.image = image
}
Anyway, the above yields the following, where the rightmost image is your tinted image:
Needless to say, to convert your matrix into your pixels array, you can just flatten the array of arrays:
let matrix: [[Pixel]] = [
[0, 0, 125],
[10, 50, 255],
[90, 0, 255]
]
pixels = matrix.flatMap { $0 }
Here is a parallelized rendition which is also slightly more efficient with respect to the memory buffer:
private func tintedImage() -> UIImage? {
let tintAlpha = tintColor.alpha
let tintRed = tintColor.red / 255
let tintGreen = tintColor.green / 255
let tintBlue = tintColor.blue / 255
let alpha = UInt8(tintAlpha)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue
let bitsPerComponent = 8
let bytesPerRow = width * MemoryLayout<PixelData>.stride
guard
let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo),
let data = context.data
else {
return nil
}
let buffer = data.bindMemory(to: PixelData.self, capacity: width * height)
DispatchQueue.concurrentPerform(iterations: height) { row in
let start = width * row
let end = start + width
for i in start ..< end {
let pixel = pixels[i]
let red = UInt8(Float(pixel) * tintRed)
let green = UInt8(Float(pixel) * tintGreen)
let blue = UInt8(Float(pixel) * tintBlue)
buffer[i] = PixelData(r: red, g: green, b: blue, a: alpha)
}
}
return context.makeImage()
.flatMap { UIImage(cgImage: $0) }
}

Related

Manipulate pixels in NSImage (Swift)

When looking for a way to manipulate pixels in an NSImage, I found a promising approach at https://gist.github.com/larsaugustin/af414f3637885f56c837b88c3fea1e6b
However, when putting the image into a pixel array and converting the array back to an NSImage, the result is distorted.
Original image:
Image after putting into pixel array and converting back:
This is the code to reproduce the problem:
import Foundation
import AppKit
struct Pixel {
var a: UInt8
var r: UInt8
var g: UInt8
var b: UInt8
}
let fileManager : FileManager = FileManager.default
let fileURL : URL = fileManager.homeDirectoryForCurrentUser.appendingPathComponent("IMG_RGB_ORIG.png")
let imageData = try Data(contentsOf: fileURL)
let imageOrig = NSImage(data: imageData)
let pixels = imageToPixels(image: imageOrig!)
let imageConv = pixelsToImage(pixels: pixels,
width: Int(imageOrig!.size.width),
height: Int(imageOrig!.size.height))
func imageToPixels(image: NSImage) -> [Pixel] {
var returnPixels = [Pixel]()
let pixelData = (image.cgImage(forProposedRect: nil, context: nil, hints: nil)!).dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
for y in 0..<Int(image.size.height) {
for x in 0..<Int(image.size.width) {
let pos = CGPoint(x: x, y: y)
let pixelInfo: Int = ((Int(image.size.width) * Int(pos.y) * 4) + Int(pos.x) * 4)
let r = data[pixelInfo]
let g = data[pixelInfo + 1]
let b = data[pixelInfo + 2]
let a = data[pixelInfo + 3]
returnPixels.append(Pixel(a: a, r: r, g: g, b: b))
}
}
return returnPixels
}
func pixelsToImage(pixels: [Pixel], width: Int, height: Int) -> NSImage? {
guard width > 0 && height > 0 else { return nil }
guard pixels.count == width * height else { return nil }
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
let bitsPerComponent = 8
let bitsPerPixel = 32
var data = pixels
guard let providerRef = CGDataProvider(data: NSData(bytes: &data,
length: data.count * MemoryLayout<Pixel>.size)
)
else { return nil }
guard let cgim = CGImage(
width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: width * MemoryLayout<Pixel>.size,
space: rgbColorSpace,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent
)
else { return nil }
return NSImage(cgImage: cgim, size: CGSize(width: width, height: height))
}
I am using an M1 mac and the result is the same with Rosetta or without.
The problem seems to occur already in the conversion into the pixel array. Any ideas on how to correct the code?
Meanwhile, I managed to change the pixels in NSImage based on the approach described in
Objective-C: NSBitmapImageRep SetColor which uses the setPixel() method on an NSBitmapImageRep.
Below code illustrates how it works (please do not force unwrap optionals in your code).
import Foundation
import AppKit
let fileManager : FileManager = FileManager.default
let fileURL : URL = fileManager.homeDirectoryForCurrentUser.appendingPathComponent("IMG_RGB_ORIG.png")
let imgData = try Data(contentsOf: fileURL)
let imgOrig = NSImage(data: imgData)
let rep = NSBitmapImageRep(bitmapDataPlanes: nil,
pixelsWide: Int(imgOrig!.size.width),
pixelsHigh: Int(imgOrig!.size.height),
bitsPerSample: 8,
samplesPerPixel: 4,
hasAlpha: true,
isPlanar: false,
colorSpaceName: .deviceRGB,
bytesPerRow: Int(imgOrig!.size.width) * 4,
bitsPerPixel: 32)
let ctx = NSGraphicsContext.init(bitmapImageRep: rep!)
NSGraphicsContext.saveGraphicsState()
NSGraphicsContext.current = NSGraphicsContext(bitmapImageRep: rep!)
imgOrig!.draw(at: NSZeroPoint, from: NSZeroRect, operation: NSCompositingOperation.copy, fraction: 1.0)
ctx?.flushGraphics()
NSGraphicsContext.restoreGraphicsState()
for y in 0..<Int(imgOrig!.size.height) {
for x in 0..<Int(imgOrig!.size.width) {
// you can read the color of pixels like this:
let color = rep!.colorAt(x: x, y: y)
var colorR = color!.redComponent
var colorG = color!.greenComponent
var colorB = color!.blueComponent
var colorA = color!.alphaComponent
}
}
var yellowPixel : [Int] = [255, 255, 0 , 255]
var redPixel : [Int] = [255, 0 , 0 , 255]
var greenPixel : [Int] = [0 , 255, 0 , 255]
var bluePixel : [Int] = [0 , 0 , 255, 255]
for y in 10..<Int(imgOrig!.size.height) {
for x in 10..<Int(imgOrig!.size.width) {
// you can change the color of pixels like this:
rep!.setPixel(&yellowPixel, atX: x, y: y)
}
}
let imgConv = NSImage(cgImage: rep!.cgImage!, size: NSSize(width: Int(imgOrig!.size.width), height: Int(imgOrig!.size.height)))

How can I change this black shade (gradient) to some colored shade?

I want to use the actual color of the image, apply alpha effect pixel by pixel and create something like this but in colored way. How can I do it?
I tried giving it a color but it turned my image from white to pink.
In this code, I go pixel by pixel and change the pixel alpha to 0 or 1. If I make the rgb to black e.g 0, it creates the right gradient, however when I use rgb values other than 0, it turns into white shades which is not required
func processPixels(in image: UIImage) -> UIImage? {
let cgImage = convertCIImageToCGImage(inputImage: image)
guard let inputCGImage = cgImage else {
print("unable to get cgImage")
return nil
}
let colorSpace = CGColorSpaceCreateDeviceRGB()
let width = inputCGImage.width
let height = inputCGImage.height
let bytesPerPixel = 4
let bitsPerComponent = 8
let bytesPerRow = bytesPerPixel * width
let bitmapInfo = RGBA32.bitmapInfo
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) else {
print("unable to create context")
return nil
}
context.draw(inputCGImage, in: CGRect(x: 0, y: 0, width: width, height: height))
guard let buffer = context.data else {
print("unable to get context data")
return nil
}
let pixelBuffer = buffer.bindMemory(to: RGBA32.self, capacity: width * height)
var rowAlpha: UInt8 = 0
for row in 0 ..< Int(height) {
rowAlpha = UInt8(row)
for column in 0 ..< Int(width) {
let offset = row * width + column
if pixelBuffer[offset].alphaComponent > 0 {
pixelBuffer[offset] = RGBA32(red: pixelBuffer[offset].redComponent, green: pixelBuffer[offset].greenComponent, blue: pixelBuffer[offset].blueComponent, alpha: rowAlpha)
}
}
}
let outputCGImage = context.makeImage()!
let outputImage = UIImage(cgImage: outputCGImage, scale: image.scale, orientation: image.imageOrientation)
return outputImage
}
func convertCIImageToCGImage(inputImage: UIImage) -> CGImage? {
guard let ciImage = inputImage.ciImage else {
print("unable to get ciImage")
return nil
}
let context = CIContext(options: nil)
if let cgImage = context.createCGImage(ciImage, from: ciImage.extent) {
return cgImage
}
return nil
}
Original Image
Gradient Image without color

Swift - Picking pixel colour of UIImage - memory crash

I want to pick colour of specific pixel of UIImage in Swift 3 and this method is called ~10k times.
func pixelColour(_ pixelPosition: CGPoint) {
if !CGRect(x: 0.0, y: 0.0, width: self.size.width, height: self.size.height).contains(pixelPosition) {
return false
}
let pointX = trunc(pixelPosition.x);
let pointY = trunc(pixelPosition.y);
let cgImage = self.cgImage;
let width = self.size.width;
let height = self.size.height;
let colorSpace = CGColorSpaceCreateDeviceRGB();
let bytesPerPixel = 4;
let bytesPerRow = bytesPerPixel * 1;
let bitsPerComponent = 8;
let pixelData = UnsafeMutablePointer<CUnsignedChar>.allocate(capacity: 4)
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue)
let context = CGContext(data: pixelData, width: 1, height: 1, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)
context?.setBlendMode(CGBlendMode.copy);
context?.translateBy(x: -pointX, y: pointY-CGFloat(height));
// This line takes too much memory, how to release memory here?
context?.draw(cgImage!, in: CGRect(x: 0.0, y: 0.0, width: CGFloat(width), height: CGFloat(height)));
print("\(pixelData[0]) \(pixelData[1]) \(pixelData[2]) ")
pixelData.deallocate(capacity: 4)
}
Unfortunately it seems that memory is not released, because it crashes after checking ~500 pixels. How I can solve this problem?
You have not shown how pixelColour is called, but I presume that it is in some sort of loop. If so, surround your repeated call to pixelColour with an autoreleasepool{...} call to release the accumulated memory each time through your loop:
let p = // next CGPoint
autoreleasepool {
pixelColour(p)
}
Taken from How do I get the color of a pixel in a UIImage with Swift?
and converted to Swift 3
extension UIImage{
func getPixelColor(pos: CGPoint) -> UIColor? {
guard let provider=cgImage?.dataProvider else{
return nil
}
let pixelData = provider.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
This should not take to much memory, since it uses pointers so it doesn't load the entire image.
hope it helps

UIImage to UIColor array of pixel colors

I'm sorry to ask this, but I can't figure out how to represent a UIImage as an array of UIColor for each pixel. I've tried my best with converting UIImagePNG/JPEGRepresentation but couldn't get the desired result.
Here's a Swiftier version (Swift 3):
extension UIImage {
var colors: [UIColor]? {
var colors = [UIColor]()
let colorSpace = CGColorSpaceCreateDeviceRGB()
guard let cgImage = cgImage else {
return nil
}
let width = Int(size.width)
let height = Int(size.height)
var rawData = [UInt8](repeating: 0, count: width * height * 4)
let bytesPerPixel = 4
let bytesPerRow = bytesPerPixel * width
let bytesPerComponent = 8
let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Big.rawValue
let context = CGContext(data: &rawData,
width: width,
height: height,
bitsPerComponent: bytesPerComponent,
bytesPerRow: bytesPerRow,
space: colorSpace,
bitmapInfo: bitmapInfo)
let drawingRect = CGRect(origin: .zero, size: CGSize(width: width, height: height))
context?.draw(cgImage, in: drawingRect)
for x in 0..<width {
for y in 0..<height {
let byteIndex = (bytesPerRow * x) + y * bytesPerPixel
let red = CGFloat(rawData[byteIndex]) / 255.0
let green = CGFloat(rawData[byteIndex + 1]) / 255.0
let blue = CGFloat(rawData[byteIndex + 2]) / 255.0
let alpha = CGFloat(rawData[byteIndex + 3]) / 255.0
let color = UIColor(red: red, green: green, blue: blue, alpha: alpha)
colors.append(color)
}
}
return colors
}
}
This is simply a Swift's translation of Olie's answer to the same question in ObjC. Make sure you give him an upvote as well.
extension UIImage {
func colorArray() -> [UIColor] {
let result = NSMutableArray()
let img = self.CGImage
let width = CGImageGetWidth(img)
let height = CGImageGetHeight(img)
let colorSpace = CGColorSpaceCreateDeviceRGB()
var rawData = [UInt8](count: width * height * 4, repeatedValue: 0)
let bytesPerPixel = 4
let bytesPerRow = bytesPerPixel * width
let bytesPerComponent = 8
let bitmapInfo = CGImageAlphaInfo.PremultipliedLast.rawValue | CGBitmapInfo.ByteOrder32Big.rawValue
let context = CGBitmapContextCreate(&rawData, width, height, bytesPerComponent, bytesPerRow, colorSpace, bitmapInfo)
CGContextDrawImage(context, CGRectMake(0, 0, CGFloat(width), CGFloat(height)), img);
for x in 0..<width {
for y in 0..<height {
let byteIndex = (bytesPerRow * x) + y * bytesPerPixel
let red = CGFloat(rawData[byteIndex] ) / 255.0
let green = CGFloat(rawData[byteIndex + 1]) / 255.0
let blue = CGFloat(rawData[byteIndex + 2]) / 255.0
let alpha = CGFloat(rawData[byteIndex + 3]) / 255.0
let color = UIColor(red: red, green: green, blue: blue, alpha: alpha)
result.addObject(color)
}
}
return (result as NSArray) as! [UIColor]
}
}
Note that this runs rather slowly. It takes 35 seconds for the simulator to decode a 15MP image, and that's on a quad-core i7.
To get pixels try
let image = UIImage(named: "pic2.png")
if let cgImage = image?.cgImage, let data = cgImage.dataProvider?.data, let bytes = CFDataGetBytePtr(data){
assert(cgImage.colorSpace?.model == .rgb)
var stringArray = [String]()
let bytesPerPixel = cgImage.bitsPerPixel / cgImage.bitsPerComponent
for y in 0 ..< cgImage.height{
for x in 0 ..< cgImage.width{
let offset = (y * cgImage.bytesPerRow) + ( x * bytesPerPixel)
let components = (r: bytes[offset], g: bytes[offset + 1], b: bytes[offset + 2])
stringArray.append("[x: \(x), y:\(y)] \(components)")
}
}
print(stringArray)
}

Convert an image into a 2D array (or equivalent) in Apple Swift

I'm wondering how I can turn a UIImage into something usable and modifiable. Java code to handle what I need would look something like this:
BufferedImage img= ImageIO.read(file);
Raster raster=img.getData();
int w=raster.getWidth(),h=raster.getHeight();
int pixels[][]=new int[w][h];
for (int x=0;x<w;x++)
{
for(int y=0;y<h;y++)
{
pixels[x][y]=raster.getSample(x,y,0);
}
}
I need to modify the alpha values in an image by visiting each pixel in the image.
Untested, but I think this will either work or should be very close.
import UIKit
import CoreGraphics
var uiimage = UIImage(contentsOfFile: "/PATH/TO/image.png")
var image = uiimage.CGImage
let width = CGImageGetWidth(image)
let height = CGImageGetHeight(image)
let colorspace = CGColorSpaceCreateDeviceRGB()
let bytesPerRow = (4 * width);
let bitsPerComponent :UInt = 8
let pixels = UnsafePointer<UInt8>(malloc(width*height*4))
var context = CGBitmapContextCreate(pixels, width, height, bitsPerComponent, bytesPerRow, colorspace,
CGBitmapInfo());
CGContextDrawImage(context, CGRectMake(0, 0, CGFloat(width), CGFloat(height)), image)
for x in 0..width {
for y in 0..height {
//Here is your raw pixels
let offset = 4*((Int(width) * Int(y)) + Int(x))
let alpha = pixels[offset]
let red = pixels[offset+1]
let green = pixels[offset+2]
let blue = pixels[offset+3]
}
}
If you really need conversion to 2D array, render image into byte array via CGContext and then split array to parts. CGContext uses 0...255 color range instead of 0...1. Byte array will be in rgba format.
Sample code with conversion to 0...1:
import UIKit
import CoreGraphics
func pixelData() -> [UInt8]? {
let dataSize = size.width * size.height * 4
var pixelData = [UInt8](repeating: 0, count: Int(dataSize))
let colorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: &pixelData,
width: Int(size.width),
height: Int(size.height),
bitsPerComponent: 8,
bytesPerRow: 4 * Int(size.width),
space: colorSpace,
bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue)
guard let cgImage = self.cgImage,
let context = context else { return nil }
context.draw(cgImage, in: CGRect(origin: .zero, size: size))
return pixelData
}
func pixelMatrix() -> [[[Float]]]? {
guard let pixels = pixelData() else {
return nil
}
var data: [[[Float]]] = []
let width = Int(size.width)
let height = Int(size.height)
for y in 0..<height {
var row: [[Float]] = []
for x in 0..<width {
let offset = 4 * ((width * y) + x)
let red = Float(pixels[offset]) / 255
let green = Float(pixels[offset + 1]) / 255
let blue = Float(pixels[offset + 2]) / 255
let alpha = Float(pixels[offset + 3]) / 255
let pixel = [red, green, blue, alpha]
row.append(pixel)
}
data.append(row)
}
return data
}