Manipulate pixels in NSImage (Swift) - swift

When looking for a way to manipulate pixels in an NSImage, I found a promising approach at https://gist.github.com/larsaugustin/af414f3637885f56c837b88c3fea1e6b
However, when putting the image into a pixel array and converting the array back to an NSImage, the result is distorted.
Original image:
Image after putting into pixel array and converting back:
This is the code to reproduce the problem:
import Foundation
import AppKit
struct Pixel {
var a: UInt8
var r: UInt8
var g: UInt8
var b: UInt8
}
let fileManager : FileManager = FileManager.default
let fileURL : URL = fileManager.homeDirectoryForCurrentUser.appendingPathComponent("IMG_RGB_ORIG.png")
let imageData = try Data(contentsOf: fileURL)
let imageOrig = NSImage(data: imageData)
let pixels = imageToPixels(image: imageOrig!)
let imageConv = pixelsToImage(pixels: pixels,
width: Int(imageOrig!.size.width),
height: Int(imageOrig!.size.height))
func imageToPixels(image: NSImage) -> [Pixel] {
var returnPixels = [Pixel]()
let pixelData = (image.cgImage(forProposedRect: nil, context: nil, hints: nil)!).dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
for y in 0..<Int(image.size.height) {
for x in 0..<Int(image.size.width) {
let pos = CGPoint(x: x, y: y)
let pixelInfo: Int = ((Int(image.size.width) * Int(pos.y) * 4) + Int(pos.x) * 4)
let r = data[pixelInfo]
let g = data[pixelInfo + 1]
let b = data[pixelInfo + 2]
let a = data[pixelInfo + 3]
returnPixels.append(Pixel(a: a, r: r, g: g, b: b))
}
}
return returnPixels
}
func pixelsToImage(pixels: [Pixel], width: Int, height: Int) -> NSImage? {
guard width > 0 && height > 0 else { return nil }
guard pixels.count == width * height else { return nil }
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
let bitsPerComponent = 8
let bitsPerPixel = 32
var data = pixels
guard let providerRef = CGDataProvider(data: NSData(bytes: &data,
length: data.count * MemoryLayout<Pixel>.size)
)
else { return nil }
guard let cgim = CGImage(
width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: width * MemoryLayout<Pixel>.size,
space: rgbColorSpace,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent
)
else { return nil }
return NSImage(cgImage: cgim, size: CGSize(width: width, height: height))
}
I am using an M1 mac and the result is the same with Rosetta or without.
The problem seems to occur already in the conversion into the pixel array. Any ideas on how to correct the code?

Meanwhile, I managed to change the pixels in NSImage based on the approach described in
Objective-C: NSBitmapImageRep SetColor which uses the setPixel() method on an NSBitmapImageRep.
Below code illustrates how it works (please do not force unwrap optionals in your code).
import Foundation
import AppKit
let fileManager : FileManager = FileManager.default
let fileURL : URL = fileManager.homeDirectoryForCurrentUser.appendingPathComponent("IMG_RGB_ORIG.png")
let imgData = try Data(contentsOf: fileURL)
let imgOrig = NSImage(data: imgData)
let rep = NSBitmapImageRep(bitmapDataPlanes: nil,
pixelsWide: Int(imgOrig!.size.width),
pixelsHigh: Int(imgOrig!.size.height),
bitsPerSample: 8,
samplesPerPixel: 4,
hasAlpha: true,
isPlanar: false,
colorSpaceName: .deviceRGB,
bytesPerRow: Int(imgOrig!.size.width) * 4,
bitsPerPixel: 32)
let ctx = NSGraphicsContext.init(bitmapImageRep: rep!)
NSGraphicsContext.saveGraphicsState()
NSGraphicsContext.current = NSGraphicsContext(bitmapImageRep: rep!)
imgOrig!.draw(at: NSZeroPoint, from: NSZeroRect, operation: NSCompositingOperation.copy, fraction: 1.0)
ctx?.flushGraphics()
NSGraphicsContext.restoreGraphicsState()
for y in 0..<Int(imgOrig!.size.height) {
for x in 0..<Int(imgOrig!.size.width) {
// you can read the color of pixels like this:
let color = rep!.colorAt(x: x, y: y)
var colorR = color!.redComponent
var colorG = color!.greenComponent
var colorB = color!.blueComponent
var colorA = color!.alphaComponent
}
}
var yellowPixel : [Int] = [255, 255, 0 , 255]
var redPixel : [Int] = [255, 0 , 0 , 255]
var greenPixel : [Int] = [0 , 255, 0 , 255]
var bluePixel : [Int] = [0 , 0 , 255, 255]
for y in 10..<Int(imgOrig!.size.height) {
for x in 10..<Int(imgOrig!.size.width) {
// you can change the color of pixels like this:
rep!.setPixel(&yellowPixel, atX: x, y: y)
}
}
let imgConv = NSImage(cgImage: rep!.cgImage!, size: NSSize(width: Int(imgOrig!.size.width), height: Int(imgOrig!.size.height)))

Related

Incorrect saving of transparent UIImage to Photo Library as png with UIImageWriteToSavedPhotosAlbum

I have a function cropAlpha() that trims the extra space defined by the transparency.
func cropAlpha() -> UIImage {
let cgImage = self.cgImage!
let width = cgImage.width
let height = cgImage.height
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bytesPerPixel:Int = 4
let bytesPerRow = bytesPerPixel * width
let bitsPerComponent = 8
let bitmapInfo: UInt32 = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Big.rawValue
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo),
let ptr = context.data?.assumingMemoryBound(to: UInt8.self)
else { return self }
context.draw(self.cgImage!, in: CGRect(x: 0, y: 0, width: width, height: height))
var minX = width
var minY = height
var maxX: Int = 0
var maxY: Int = 0
for x in 1 ..< width {
for y in 1 ..< height {
let i = bytesPerRow * Int(y) + bytesPerPixel * Int(x)
let a = CGFloat(ptr[i + 3]) / 255.0
if a == 1 {
if (x < minX) { minX = x }
if (x > maxX) { maxX = x }
if (y < minY) { minY = y }
if (y > maxY) { maxY = y }
}
}
}
let rect = CGRect(x: CGFloat(minX),y: CGFloat(minY), width: CGFloat(maxX - minX), height: CGFloat(maxY-minY))
let croppedImage = self.cgImage!.cropping(to: rect)!
let ret = UIImage(cgImage: croppedImage)
return ret
}
The image returned by this function has transparent elements and I put it in the ImageView: presenterImageView.image = imagePNG. It works as it should. But when I try to save UIImage to Photo Gallery, transparent background turns white.
let image = maskedImage?.cropAlpha()
let imagePNGData = image!.pngData()
let imagePNG = UIImage(data: imagePNGData!)
UIImageWriteToSavedPhotosAlbum(imagePNG!, nil, nil, nil)
If I don't use that function, I get the result I want, but the image has too much wasted space. I don't understand what could be the reason. Any ideas?
The problem is that UIImageWriteToSavedPhotosAlbum does not handle properly saving a UIImage with premultiplied alpha (or at least the result of saving such image is not what you expect) and your cropping method uses premultipliedLast format. You also can't just simply change CGImageAlphaInfo to a non-premultiplied format because it is not supported there (you will see an error CGBitmapContextCreate: unsupported parameter combination if you try that). But what you can do is convert the cropped image to CIImage, unpremultiply alpha and convert back to UIImage. To do that your saving code could look like below (however I recommend removing force unwrapping from this code if you plan to use it in final app):
let image = maskedImage?.cropAlpha()
let ciImage = CIImage(image: image!)!.unpremultiplyingAlpha()
let uiImage = UIImage(ciImage: ciImage)
let imagePNGData = uiImage.pngData()
let imagePNG = UIImage(data: imagePNGData!)
UIImageWriteToSavedPhotosAlbum(imagePNG!, nil, nil, nil)

Find similarities as a percentage between two UIImage

I'm trying to build an app to find similarities as a percentage between two UIImage, the app captures the user stroke pencil and converts it to UIImage then finds similarity pixel by pixel with the image stored on the app.
I found code that finds difference (NOT similarity), that is work if I have two images as JPEG but in my case that gives me an error when I click the button
convertoand:]: unrecognized selector sent to instance 0x10900f2d0"
The button will capture stroke pencil and then compare in this func compareImages(image1: imageStroke, image2: imageOnApp) then print it on Label:
#IBAction func btnFindSimilarity(_ sender: Any) {
let imgRect = CGRect(x: 0, y: 0, width: 400, height: 100)
let imageStroke = canvasView.drawing.image(from: imgRect, scale: 1.0)
let compareResult = compareImages(image1: imageStroke, image2: imageOnApp)
lblPrintScore.text = "\(String(describing: compareResult))"
}
find diffrence code:
func pixelValues(fromCGImage imageRef: CGImage?) -> [UInt8]?
{
var width = 0
var height = 0
var pixelValues: [UInt8]?
if let imageRef = imageRef {
width = imageRef.width
height = imageRef.height
let bitsPerComponent = imageRef.bitsPerComponent
let bytesPerRow = imageRef.bytesPerRow
let totalBytes = height * bytesPerRow
let bitmapInfo = imageRef.bitmapInfo
let colorSpace = CGColorSpaceCreateDeviceRGB()
var intensities = [UInt8](repeating: 0, count: totalBytes)
let contextRef = CGContext(data: &intensities,
width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bytesPerRow: bytesPerRow,
space: colorSpace,
bitmapInfo: bitmapInfo.rawValue)
contextRef?.draw(imageRef, in: CGRect(x: 0.0, y: 0.0, width: CGFloat(width), height: CGFloat(height)))
pixelValues = intensities
}
return pixelValues
}
func compareImages(image1: UIImage, image2: UIImage) -> Double? {
guard let data1 = pixelValues(fromCGImage: image1.cgImage),
let data2 = pixelValues(fromCGImage: image2.cgImage),
data1.count == data2.count else {
return nil
}
let width = Double(image1.size.width)
let height = Double(image1.size.height)
return zip(data1, data2)
.enumerated()
.reduce(0.0) {
$1.offset % 4 == 3 ? $0 : $0 + abs(Double($1.element.0) - Double($1.element.1))
}
* 100 / (width * height * 3.0) / 255.0
}
I tried to change the code to find similarities but I was failed
Although the following example is for SwiftUI, the code can be used elsewhere.
I found on SO how to extract the colors from an UIImage (see findColors How do I get the color of a pixel in a UIImage with Swift?),
and using that, I compare 2 images for rgb similarities (see isRGBSimilar).
#main
struct TestApp: App {
var body: some Scene {
WindowGroup {
ContentView()
}
}
}
struct ContentView: View {
#State var simili = 0.0
var body: some View {
Text(String(simili))
.onAppear {
guard let img1 = UIImage(named: "cat1.png") else { return }
guard let img2 = UIImage(named: "cat2.png") else { return }
print("---> img1: \(img1.size) img2: \(img2.size)")
if let percent = compareImages(image1: img1, image2: img2) {
print("----> percent: \(percent) \n")
simili = percent
}
}
}
// from: https://stackoverflow.com/questions/25146557/how-do-i-get-the-color-of-a-pixel-in-a-uiimage-with-swift
func findColors(_ image: UIImage) -> [UIColor] {
let pixelsWide = Int(image.size.width)
let pixelsHigh = Int(image.size.height)
guard let pixelData = image.cgImage?.dataProvider?.data else { return [] }
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
var imageColors: [UIColor] = []
for x in 0..<pixelsWide {
for y in 0..<pixelsHigh {
let point = CGPoint(x: x, y: y)
let pixelInfo: Int = ((pixelsWide * Int(point.y)) + Int(point.x)) * 4
let color = UIColor(red: CGFloat(data[pixelInfo]) / 255.0,
green: CGFloat(data[pixelInfo + 1]) / 255.0,
blue: CGFloat(data[pixelInfo + 2]) / 255.0,
alpha: CGFloat(data[pixelInfo + 3]) / 255.0)
imageColors.append(color)
}
}
return imageColors
}
func compareImages(image1: UIImage, image2: UIImage) -> Double? {
let data1 = findColors(image1)
let data2 = findColors(image2)
guard data1.count == data2.count else { return nil }
var similarr = [Bool]()
for i in data1.indices {
similarr.append(isRGBSimilar(data1[i], data2[i], 0.1)) // <-- set epsilon
}
let simi = similarr.filter{$0}
let result = (Double(simi.count * 100) / (Double(image1.size.width) * Double(image1.size.height)))
return result
}
// compare 2 colors to be within d of each other
func isRGBSimilar(_ f: UIColor, _ t: UIColor, _ d: CGFloat) -> Bool {
var r1: CGFloat = 0; var g1: CGFloat = 0; var b1: CGFloat = 0; var a1: CGFloat = 0
f.getRed(&r1, green: &g1, blue: &b1, alpha: &a1)
var r2: CGFloat = 0; var g2: CGFloat = 0; var b2: CGFloat = 0; var a2: CGFloat = 0
t.getRed(&r2, green: &g2, blue: &b2, alpha: &a2)
return abs(r1 - r2) <= d && abs(g1 - g2) <= d && abs(b1 - b2) <= d && abs(a1 - a2) <= d
}

How to create NSImage without device screen scaling

I would like to generate 64x64 pixel NSImage and then perform pixel by pixel processing on the data. However, when I generate the image in a fairly canonical way on my retina iMac, the resulting image is 128x128. I tried adding a custom imageRep, but that had no effect. Here is some playground code:
let side = 64
let size = NSSize(width: side, height: side)
let imRect = NSRect(origin: NSPoint(x: 0, y: 0), size: size)
let result = NSImage(size: size)
let rep2 = NSBitmapImageRep(bitmapDataPlanes: nil,
pixelsWide: side, pixelsHigh: side,
bitsPerSample: 8, samplesPerPixel: 4,
hasAlpha: true, isPlanar: false,
colorSpaceName: NSColorSpaceName.calibratedRGB,
bytesPerRow: side * 4, bitsPerPixel: 32)
result.addRepresentation(rep2!)
let backColor = NSColor.green
result.lockFocus()
backColor.setFill()
imRect.fill()
randomAttrGlyph.draw(at: NSPoint(x: 2, y: 2))
result.unlockFocus()
let cgImg = result.cgImage(forProposedRect: nil, context: nil, hints: nil)
result and cgImg are both 128x128.
Also, pulling the RGBA data produces junk, for example a .white pixel produces an
RGBA data of (0, 60, 0, 60) instead of the expected (255, 255, 255, 255)
The code I'm using for pixel data is
let size = image.size
let cgImg = (image.cgImage(forProposedRect: nil, context: nil, hints: nil)!)
let cgWidth = cgImg.width
let cgHeight = cgImg.height
var returnPixels = [[Pixel]](repeating: [Pixel](repeating: black, count: Int(cgWidth)), count: Int(cgHeight)) // two d array for scaled grayscale
let pixelData = (image.cgImage(forProposedRect: nil, context: nil, hints: nil)!).dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
for y in 0..<Int(cgHeight) {
for x in 0..<Int(cgWidth) {
let pos = CGPoint(x: x, y: y)
let pixelInfo: Int = ((Int(image.size.width) * Int(pos.y) * 4) + Int(pos.x) * 4)
let r = data[pixelInfo]
let g = data[pixelInfo + 1]
let b = data[pixelInfo + 2]
let a = data[pixelInfo + 3]
let sum = UInt16(r) + UInt16(g) + UInt16(b)
returnPixels[y][x] = Pixel(a: a, r: r, g: g, b: b, grey: sum) //no alpha
}
}
Any suggestions?

Creating CGImage/UIImage from grayscale matrix

I have a matrix of greyscale image pixels, for example:
[ [0, 0, 125],
[10, 50, 255],
[90, 0, 255] ]
My goal is to apply a tint to it (UIColor) and export a CGImage/UIImage from the structure that holds it.
public typealias Pixel = UInt8
extension UIColor {
var red: Float { return Float(CIColor(color: self).red * 255) }
var green: Float { return Float(CIColor(color: self).green * 255) }
var blue: Float { return Float(CIColor(color: self).blue * 255) }
var alpha: Float { return Float(CIColor(color: self).alpha * 255) }
}
public struct PixelData {
let r: UInt8
let g: UInt8
let b: UInt8
let a: UInt8
}
public struct Map {
let pixelCount: UInt
let pixels: [Pixel] //all pixels of an image, linear
let dimension: UInt //square root of pixel count
let tintColor: UIColor = UIColor(red: 9/255, green: 133/255, blue: 61/255, alpha: 1)
public var image: UIImage? {
var pixelsData = [PixelData]()
pixelsData.reserveCapacity(Int(pixelCount) * 3)
let alpha = UInt8(tintColor.alpha)
let redValue = tintColor.red
let greenValue = tintColor.green
let blueValue = tintColor.blue
let red: [PixelData] = pixels.map {
let redInt: UInt8 = UInt8((Float($0) / 255.0) * redValue)
return PixelData(r: redInt, g: 0, b: 0, a: alpha)
}
let green: [PixelData] = pixels.map {
let greenInt: UInt8 = UInt8((Float($0) / 255.0) * greenValue)
return PixelData(r: 0, g: greenInt, b: 0, a: alpha) }
let blue: [PixelData] = pixels.map {
let blueInt: UInt8 = UInt8((Float($0) / 255.0) * blueValue)
return PixelData(r: 0, g: 0, b: blueInt, a: alpha) }
pixelsData.append(contentsOf: red)
pixelsData.append(contentsOf: green)
pixelsData.append(contentsOf: blue)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
let bitsPerComponent = 8
let bitsPerPixel = 32
let dimension: Int = Int(self.dimension)
var data = pixelsData
guard let providerRef = CGDataProvider(
data: NSData(bytes: &data, length: data.count * MemoryLayout<PixelData>.size)
) else { return nil }
if let cgim = CGImage(
width: dimension,
height: dimension,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: dimension * MemoryLayout<PixelData>.size,
space: rgbColorSpace,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent
) {
return UIImage(cgImage: cgim)
}
return nil
}
}
The problem is the output looks gibberish. I have used this tutorial and this SO thread but with no success. The result in the playground is:
(the output is there, just barely visible)
Any help appreciated!
There are two key issues.
The code is calculating all the red values for every grayscale pixel and creating the four byte PixelData for each (even though only the red channel is populated) and adding that to the pixelsData array. It then repeats that for the green values, and then again for the blue values. That results in three times as much data as one needs for the image, and only the red channel data is being used.
Instead, we should calculate the RGBA values once, create a PixelData for each, and repeat this pixel by pixel.
The premultipliedFirst means ARGB. But your structure is using RGBA, so you want premultipliedLast.
Thus:
func generateTintedImage(completion: #escaping (UIImage?) -> Void) {
DispatchQueue.global(qos: .userInitiated).async {
let image = self.tintedImage()
DispatchQueue.main.async {
completion(image)
}
}
}
private func tintedImage() -> UIImage? {
let tintRed = tintColor.red
let tintGreen = tintColor.green
let tintBlue = tintColor.blue
let tintAlpha = tintColor.alpha
let data = pixels.map { pixel -> PixelData in
let red = UInt8((Float(pixel) / 255) * tintRed)
let green = UInt8((Float(pixel) / 255) * tintGreen)
let blue = UInt8((Float(pixel) / 255) * tintBlue)
let alpha = UInt8(tintAlpha)
return PixelData(r: red, g: green, b: blue, a: alpha)
}.withUnsafeBytes { Data($0) }
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue)
let bitsPerComponent = 8
let bitsPerPixel = 32
guard
let providerRef = CGDataProvider(data: data as CFData),
let cgImage = CGImage(width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: width * MemoryLayout<PixelData>.stride,
space: rgbColorSpace,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent)
else {
return nil
}
return UIImage(cgImage: cgImage)
}
I’ve also renamed a few variables, used stride instead of size, replaced dimension with width and height so I could process non-square images, etc.
I also would advise against using a computed property for anything this computationally intense, so I gave this an asynchronous method, which you might use as follows:
let map = Map(with: image)
map.generateTintedImage { image in
self.tintedImageView.image = image
}
Anyway, the above yields the following, where the rightmost image is your tinted image:
Needless to say, to convert your matrix into your pixels array, you can just flatten the array of arrays:
let matrix: [[Pixel]] = [
[0, 0, 125],
[10, 50, 255],
[90, 0, 255]
]
pixels = matrix.flatMap { $0 }
Here is a parallelized rendition which is also slightly more efficient with respect to the memory buffer:
private func tintedImage() -> UIImage? {
let tintAlpha = tintColor.alpha
let tintRed = tintColor.red / 255
let tintGreen = tintColor.green / 255
let tintBlue = tintColor.blue / 255
let alpha = UInt8(tintAlpha)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue
let bitsPerComponent = 8
let bytesPerRow = width * MemoryLayout<PixelData>.stride
guard
let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo),
let data = context.data
else {
return nil
}
let buffer = data.bindMemory(to: PixelData.self, capacity: width * height)
DispatchQueue.concurrentPerform(iterations: height) { row in
let start = width * row
let end = start + width
for i in start ..< end {
let pixel = pixels[i]
let red = UInt8(Float(pixel) * tintRed)
let green = UInt8(Float(pixel) * tintGreen)
let blue = UInt8(Float(pixel) * tintBlue)
buffer[i] = PixelData(r: red, g: green, b: blue, a: alpha)
}
}
return context.makeImage()
.flatMap { UIImage(cgImage: $0) }
}

Convert an image into a 2D array (or equivalent) in Apple Swift

I'm wondering how I can turn a UIImage into something usable and modifiable. Java code to handle what I need would look something like this:
BufferedImage img= ImageIO.read(file);
Raster raster=img.getData();
int w=raster.getWidth(),h=raster.getHeight();
int pixels[][]=new int[w][h];
for (int x=0;x<w;x++)
{
for(int y=0;y<h;y++)
{
pixels[x][y]=raster.getSample(x,y,0);
}
}
I need to modify the alpha values in an image by visiting each pixel in the image.
Untested, but I think this will either work or should be very close.
import UIKit
import CoreGraphics
var uiimage = UIImage(contentsOfFile: "/PATH/TO/image.png")
var image = uiimage.CGImage
let width = CGImageGetWidth(image)
let height = CGImageGetHeight(image)
let colorspace = CGColorSpaceCreateDeviceRGB()
let bytesPerRow = (4 * width);
let bitsPerComponent :UInt = 8
let pixels = UnsafePointer<UInt8>(malloc(width*height*4))
var context = CGBitmapContextCreate(pixels, width, height, bitsPerComponent, bytesPerRow, colorspace,
CGBitmapInfo());
CGContextDrawImage(context, CGRectMake(0, 0, CGFloat(width), CGFloat(height)), image)
for x in 0..width {
for y in 0..height {
//Here is your raw pixels
let offset = 4*((Int(width) * Int(y)) + Int(x))
let alpha = pixels[offset]
let red = pixels[offset+1]
let green = pixels[offset+2]
let blue = pixels[offset+3]
}
}
If you really need conversion to 2D array, render image into byte array via CGContext and then split array to parts. CGContext uses 0...255 color range instead of 0...1. Byte array will be in rgba format.
Sample code with conversion to 0...1:
import UIKit
import CoreGraphics
func pixelData() -> [UInt8]? {
let dataSize = size.width * size.height * 4
var pixelData = [UInt8](repeating: 0, count: Int(dataSize))
let colorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: &pixelData,
width: Int(size.width),
height: Int(size.height),
bitsPerComponent: 8,
bytesPerRow: 4 * Int(size.width),
space: colorSpace,
bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue)
guard let cgImage = self.cgImage,
let context = context else { return nil }
context.draw(cgImage, in: CGRect(origin: .zero, size: size))
return pixelData
}
func pixelMatrix() -> [[[Float]]]? {
guard let pixels = pixelData() else {
return nil
}
var data: [[[Float]]] = []
let width = Int(size.width)
let height = Int(size.height)
for y in 0..<height {
var row: [[Float]] = []
for x in 0..<width {
let offset = 4 * ((width * y) + x)
let red = Float(pixels[offset]) / 255
let green = Float(pixels[offset + 1]) / 255
let blue = Float(pixels[offset + 2]) / 255
let alpha = Float(pixels[offset + 3]) / 255
let pixel = [red, green, blue, alpha]
row.append(pixel)
}
data.append(row)
}
return data
}