How to apply LUT (from .png) to an image? (Swift + Xcode) - swift

I am trying to apply a LUT to an image by pressing a button.
Because, I am new to programming I mostly copied code and tried to modify it for my own project. I get no error messages, but when I try to press the button there is no effect on the image.
When I change the filter-name to sth. different than: "CIColorCube", the app even crashes with the error message: "Unexpectedly found nil while unwrapping an Optional value"
My LUT Image that I am using. (size: 512x512)
func colorCubeFilterFromLUT(imageName : String) -> CIFilter? {
let size = 512
let lutImage = UIImage(named: "LUT.png")!.cgImage
let lutWidth = lutImage!.width
let lutHeight = lutImage!.height
let rowCount = lutHeight / size
let columnCount = lutWidth / size
if ((lutWidth % size != 0) || (lutHeight % size != 0) || (rowCount * columnCount != size)) {
NSLog("Invalid colorLUT %#", "LUT.png");
return nil
}
let bitmap = getBytesFromImage(image: UIImage(named: "LUT.png"))!
let floatSize = MemoryLayout<Float>.size
let cubeData = UnsafeMutablePointer<Float>.allocate(capacity: size * size * size * 4 * floatSize)
var z = 0
var bitmapOffset = 0
for _ in 0 ..< rowCount {
for y in 0 ..< size {
let tmp = z
for _ in 0 ..< columnCount {
for x in 0 ..< size {
let alpha = Float(bitmap[bitmapOffset]) / 255.0
let red = Float(bitmap[bitmapOffset+1]) / 255.0
let green = Float(bitmap[bitmapOffset+2]) / 255.0
let blue = Float(bitmap[bitmapOffset+3]) / 255.0
let dataOffset = (z * size * size + y * size + x) * 4
cubeData[dataOffset + 3] = alpha
cubeData[dataOffset + 2] = red
cubeData[dataOffset + 1] = green
cubeData[dataOffset + 0] = blue
bitmapOffset += 4
}
z += 1
}
z = tmp
}
z += columnCount
}
let colorCubeData = NSData(bytesNoCopy: cubeData, length: size * size * size * 4 * floatSize, freeWhenDone: true)
// create CIColorCube Filter
let filter = CIFilter(name: "CIColorCube")
filter?.setValue(colorCubeData, forKey: "inputCubeData")
filter?.setValue(size, forKey: "inputCubeDimension")
return filter
}
func getBytesFromImage(image:UIImage?) -> [UInt8]?
{
var pixelValues: [UInt8]?
if let imageRef = image?.cgImage {
let width = Int(imageRef.width)
let height = Int(imageRef.height)
let bitsPerComponent = 8
let bytesPerRow = width * 4
let totalBytes = height * bytesPerRow
let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
let colorSpace = CGColorSpaceCreateDeviceRGB()
var intensities = [UInt8](repeating: 0, count: totalBytes)
let contextRef = CGContext(data: &intensities, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo)
contextRef?.draw(imageRef, in: CGRect(x: 0.0, y: 0.0, width: CGFloat(width), height: CGFloat(height)))
pixelValues = intensities
}
return pixelValues!
}
#IBAction func filterRetro(_ sender: Any) {
let inputImage = CIImage(image: photo.image!)
let filteredImage = inputImage?.applyingFilter("CIColorCube")
let renderedImage = context.createCGImage(filteredImage!, from: (filteredImage?.extent)!)
photo.image = UIImage(cgImage: renderedImage!)
}

Please add this code in your IBAction func filterRetro
if let photoImage = photo.image {
let inputImage = CIImage(image: photoImage)
let filteredImage = inputImage.applyingFilter("CIColorCube")
let filteredExtent = filteredImage.extent
/* hoping this will return a CGImage */
let renderedImage = context.createCGImage(filteredImage, from: filteredExtent)
photo.image = UIImage(cgImage: renderedImage)
}
The issue with optional wrapping inside the button press action is solved. But not with whole code. Let me know whether this works or not.
[edit]
inputImage.applyingFilter("name") method is calling the CIFilter.init(name). For which doc says,
The name of the filter. You must make sure the name is spelled
correctly, otherwise your app will run but not produce any output
images. For that reason, you should check for the existence of the
filter after calling this method.
I would suggest using a separate variable to create the filter and if the filter exists then only apply to the image.
let filter = CIFilter("CIColorCube")
if filter {
let filteredImage = inputImage.applyingFilter("CIColorCube")
}
try this.
thanks,
J

Related

Incorrect saving of transparent UIImage to Photo Library as png with UIImageWriteToSavedPhotosAlbum

I have a function cropAlpha() that trims the extra space defined by the transparency.
func cropAlpha() -> UIImage {
let cgImage = self.cgImage!
let width = cgImage.width
let height = cgImage.height
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bytesPerPixel:Int = 4
let bytesPerRow = bytesPerPixel * width
let bitsPerComponent = 8
let bitmapInfo: UInt32 = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Big.rawValue
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo),
let ptr = context.data?.assumingMemoryBound(to: UInt8.self)
else { return self }
context.draw(self.cgImage!, in: CGRect(x: 0, y: 0, width: width, height: height))
var minX = width
var minY = height
var maxX: Int = 0
var maxY: Int = 0
for x in 1 ..< width {
for y in 1 ..< height {
let i = bytesPerRow * Int(y) + bytesPerPixel * Int(x)
let a = CGFloat(ptr[i + 3]) / 255.0
if a == 1 {
if (x < minX) { minX = x }
if (x > maxX) { maxX = x }
if (y < minY) { minY = y }
if (y > maxY) { maxY = y }
}
}
}
let rect = CGRect(x: CGFloat(minX),y: CGFloat(minY), width: CGFloat(maxX - minX), height: CGFloat(maxY-minY))
let croppedImage = self.cgImage!.cropping(to: rect)!
let ret = UIImage(cgImage: croppedImage)
return ret
}
The image returned by this function has transparent elements and I put it in the ImageView: presenterImageView.image = imagePNG. It works as it should. But when I try to save UIImage to Photo Gallery, transparent background turns white.
let image = maskedImage?.cropAlpha()
let imagePNGData = image!.pngData()
let imagePNG = UIImage(data: imagePNGData!)
UIImageWriteToSavedPhotosAlbum(imagePNG!, nil, nil, nil)
If I don't use that function, I get the result I want, but the image has too much wasted space. I don't understand what could be the reason. Any ideas?
The problem is that UIImageWriteToSavedPhotosAlbum does not handle properly saving a UIImage with premultiplied alpha (or at least the result of saving such image is not what you expect) and your cropping method uses premultipliedLast format. You also can't just simply change CGImageAlphaInfo to a non-premultiplied format because it is not supported there (you will see an error CGBitmapContextCreate: unsupported parameter combination if you try that). But what you can do is convert the cropped image to CIImage, unpremultiply alpha and convert back to UIImage. To do that your saving code could look like below (however I recommend removing force unwrapping from this code if you plan to use it in final app):
let image = maskedImage?.cropAlpha()
let ciImage = CIImage(image: image!)!.unpremultiplyingAlpha()
let uiImage = UIImage(ciImage: ciImage)
let imagePNGData = uiImage.pngData()
let imagePNG = UIImage(data: imagePNGData!)
UIImageWriteToSavedPhotosAlbum(imagePNG!, nil, nil, nil)

Swift: Apply LUT (Lookup Table) to an image using CIColorCube slow performance

I'm applying a LUT (from a .png - Example LUT Image) to an image using CIColorCube. It works well. The only problem I'm facing is that when I create the buttons thumbnails the app stops for a few seconds.
The buttons look like this -> Buttons Example Image
This is my code:
#IBOutlet weak var filtersScrollView: UIScrollView!
var filters = ["-", "Filter1", "Filter2", "Filter3", "Filter4"]
override func viewDidLoad() {
createFilters()
}
func createFilters() {
var x: CGFloat = 10
let y: CGFloat = 0
let width: CGFloat = 60
let height: CGFloat = 83
let gap: CGFloat = 2
for i in 0..<filters.count {
let filterButton = UIButton(type: .custom)
filterButton.frame = CGRect(x: x, y: y, width: width, height: height)
filterButton.imageView?.contentMode = .scaleAspectFill
filterButton.setTitleColor(#colorLiteral(red: 1, green: 1, blue: 1, alpha: 0), for: .normal)
let text = UILabel()
text.frame = CGRect(x: 0, y: height - 21, width: filterButton.frame.width, height: 21)
text.textAlignment = .center
text.backgroundColor = #colorLiteral(red: 0.9372549057, green: 0.3490196168, blue: 0.1921568662, alpha: 1)
text.textColor = .white
text.font = .systemFont(ofSize: 8.5, weight: .medium)
filterButton.addSubview(text)
filtersScrollView.insertSubview(filterButton, at: 1)
x += width + gap
if i == 0 {
filterButton.setImage(originalImage, for: .normal)
text.text = "-"
text.backgroundColor = #colorLiteral(red: 0.1215686275, green: 0.1215686275, blue: 0.1215686275, alpha: 1)
}
else {
// THIS LINE MAKES THE APP STOP FOR A FEW SECONDS
let filteredImage = filterFromLUT(inputImage: originalCIImage, lut: "\(filters[i])")?.outputImage
filterButton.setImage(UIImage(ciImage: filteredImage!), for: .normal)
text.text = "\(filters[i])"
}
}
filtersScrollView.contentSize = CGSize(width: x, height: height)
}
func filterFromLUT(inputImage: CIImage, lut: String) -> CIFilter? {
let dimension = 64
let lutImage = UIImage(named: lut)!.cgImage
let width = lutImage!.width
let height = lutImage!.height
let rowNum = width / dimension
let columnNum = height / dimension
let bitmap = createBitmap(image: lutImage!)
let dataSize = dimension * dimension * dimension * MemoryLayout<Float>.size * 4
var array = Array<Float>(repeating: 0, count: dataSize)
var bitmapOffest: Int = 0
var z: Int = 0
for _ in stride(from: 0, to: rowNum, by: 1) {
for y in stride(from: 0, to: dimension, by: 1) {
let tmp = z
for _ in stride(from: 0, to: columnNum, by: 1) {
for x in stride(from: 0, to: dimension, by: 1) {
let dataOffset = (z * dimension * dimension + y * dimension + x) * 4
let position = bitmap!
.advanced(by: bitmapOffest)
array[dataOffset + 0] = Float(position
.advanced(by: 0)
.pointee) / 255
array[dataOffset + 1] = Float(position
.advanced(by: 1)
.pointee) / 255
array[dataOffset + 2] = Float(position
.advanced(by: 2)
.pointee) / 255
array[dataOffset + 3] = Float(position
.advanced(by: 3)
.pointee) / 255
bitmapOffest += 4
}
z += 1
}
z = tmp
}
z += columnNum
}
free(bitmap)
let data = Data.init(bytes: array, count: dataSize)
// Create CIColorCube filter
let filter = CIFilter.colorCube()
filter.inputImage = inputImage
filter.cubeData = data
filter.cubeDimension = Float(dimension)
return filter
}
func createBitmap(image: CGImage) -> UnsafeMutablePointer<UInt8>? {
let width = image.width
let height = image.height
let bitsPerComponent = 8
let bytesPerRow = width * 4
let bitmapSize = bytesPerRow * height
guard let data = malloc(bitmapSize) else {
return nil
}
let context = CGContext(
data: data,
width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bytesPerRow: bytesPerRow,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue,
releaseCallback: nil,
releaseInfo: nil)
context!.draw(image, in: CGRect(x: 0, y: 0, width: width, height: height))
return data.bindMemory(to: UInt8.self, capacity: bitmapSize)
}
I think that maybe the CGContext inside the createBitmap() function is causing this. Does anyone know how to solve this?
There are a few things you can do to improve performance:
Currently, you are processing the original input image (which I assume is pretty large) just to display the result in a 60 x 83 button. Consider scaling the image down first before putting it through the filters.
You can avoid blocking the UI by making the image processing code asynchronous. Just create the buttons in their proper size and DispatchQueue.global().async { ... } the image processing.
Don't use .setImage(UIImage(ciImage: filteredImage). In my experience creating a UIImage from a CIImage this way to very unpredictable. Rather use a CIContext to render the filtered image into a CGImage and convert that into a UIImage afterward. Also try to re-use a single CIContext instead of re-creating it again for each image.
The code for converting the LUT image into a float data array can be sped-up by using vDSP (see below).
Using vDSP for creating the LUT data:
let lutImage = UIImage(named: lut)!.cgImage
let dimension = lutImage.height
// get data from image
let lutImageData = lutImage.dataProvider?.data
let lutImageDataPtr = CFDataGetBytePtr(lutImageData)!
// convert to float and divide by 255
let numElements = dimension * dimension * dimension * 4
let inDataFloat = UnsafeMutablePointer<Float>.allocate(capacity: numElements)
vDSP_vfltu8(lutImageDataPtr, 1, inDataFloat, 1, vDSP_Length(numElements))
var div: Float = 255.0
vDSP_vsdiv(inDataFloat, 1, &div, inDataFloat, 1, vDSP_Length(numElements))
// convert buffer pointer to data
let lutData = NSData(bytesNoCopy: inDataFloat, length: numElements * MemoryLayout<Float>.size, freeWhenDone: true)

Why can't I read those MTLTexture pixels?

I am trying to get the pixels of a MTLTexture this way:
let pixelCount = compareTexture.width * compareTexture.height
let region = MTLRegionMake2D(0, 0, compareTexture.width, compareTexture.height)
var textureComponentsArray = Array<float4>(repeating: float4(0), count: pixelCount)
textureComponentsArray.withUnsafeMutableBytes {
compareTexture.getBytes($0.baseAddress!, bytesPerRow: (MemoryLayout<float4>.size * compareTexture.width), from: region, mipmapLevel: 0)
}
print(textureComponentsArray.first!)
Unfortunately most elements are either NaN or equal to the values which I initialised my textureComponentsArray with. For instance this code prints :
float4(nan, nan, nan, nan)
I am on macOS and my MTLTexture has those properties:
let textureDescriptor = MTLTextureDescriptor()
textureDescriptor.width = imageTexture.width
textureDescriptor.height = imageTexture.height
textureDescriptor.pixelFormat = imageTexture.pixelFormat
textureDescriptor.resourceOptions = .storageModeManaged
textureDescriptor.storageMode = .managed
textureDescriptor.usage = [.renderTarget, .shaderRead, .shaderWrite]
I do use the managed storage mode so the data should be available to CPU, I don't understand why it doesn't work.
Thank you.
EDIT :
I am trying to use :
blitCommandEncoder.synchronize(resource: compareTexture)
And I do wait the command buffer to complete but there is still the issue.
I needed an answer for this question for iOS. I'm leaving here the extension I wrote for my needs.
import Metal
extension MTLTexture {
func getPixels<T> (_ region: MTLRegion? = nil, mipmapLevel: Int = 0) -> UnsafeMutablePointer<T> {
let fromRegion = region ?? MTLRegionMake2D(0, 0, self.width, self.height)
let width = fromRegion.size.width
let height = fromRegion.size.height
let bytesPerRow = MemoryLayout<T>.stride * width
let data = UnsafeMutablePointer<T>.allocate(capacity: bytesPerRow * height)
self.getBytes(data, bytesPerRow: bytesPerRow, from: fromRegion, mipmapLevel: mipmapLevel)
return data
}
}
You can simply call the function. But remember that you are responsible for deallocating the memory used for the data object.
You can read the pixels if they are in different formats too. For a MTLPixelFormat.rgba32Float texture I'm retrieving the data as:
let pixels: UnsafeMutablePointer<Float32> = texture.getPixels()
defer {
pixels.deallocate()
}
let capacity = texture.width * texture.height * MemoryLayout<Float32>.stride
for i in stride(from: 0, to: capacity, by: 4) {
let l = pixels[i + 0]
let a = pixels[i + 1]
let b = pixels[i + 2]
let alpha = pixels[i + 3]
}

Accessing one class from another: Instance Member cannot be used on type

I'm getting into Swift by using the Xcode Playground, and I'm following an online course regarding simple image filtering.
So far, here's my code:
import UIKit
let image = UIImage(named: "sample")!
let rgbaImage = RGBAImage(image: image)
class ImageInfo {
let rgbaImage = RGBAImage(image: image)
func getAvgCols() -> [String:Int] {
var totalRed = 0
var totalGreen = 0
var totalBlue = 0
var avgCol = [String:Int]()
for y in 0..<rgbaImage!.height{
for x in 0..<rgbaImage!.width{
let index = y * rgbaImage!.width + x
let pixel = rgbaImage!.pixels[index]
totalRed += Int(pixel.red)
totalGreen += Int(pixel.green)
totalBlue += Int(pixel.blue)
}
}
let pixelCount = rgbaImage!.width * rgbaImage!.height
avgCol["Red"] = (totalRed / pixelCount)
avgCol["Green"] = (totalGreen / pixelCount)
avgCol["Blue"] = (totalBlue / pixelCount)
return avgCol
}
}
let imageInfo = ImageInfo()
var avgRed = imageInfo.getAvgCols()["Red"]!
var avgGreen = imageInfo.getAvgCols()["Green"]!
var avgBlue = imageInfo.getAvgCols()["Blue"]!
var modifier = 20
// IMAGE EDITING LOOP
for y in 0..<rgbaImage!.height{
for x in 0..<rgbaImage!.width{
let index = y * rgbaImage!.width + x
var pixel = rgbaImage!.pixels[index]
let redDelta = Int(pixel.red) - avgRed
let greenDelta = Int(pixel.green) - avgGreen
let blueDelta = Int(pixel.blue) - avgBlue
let redModifier = Double(modifier) * 0.49
let greenModifier = Double(modifier) * 0.25
let blueModifier = Double(modifier) * 0.07
pixel.red = UInt8(max(min(255, avgRed + Int(redModifier) * redDelta),0))
pixel.green = UInt8(max(min(255, avgGreen + Int(greenModifier) * greenDelta),0))
pixel.blue = UInt8(max(min(255, avgBlue + Int(blueModifier) * blueDelta),0))
rgbaImage!.pixels[index] = pixel
}
}
let newImage = rgbaImage!.toUIImage()!
So far, that works well. The last bit of code modifies the image on a per-pixel basis, and the end result is a poor-man's attempt at some sepia colour.
The problem, however, is when I stick the entire image editing loop in another class, like so:
class ImageEdit {
let imageInfo = ImageInfo()
var avgRed = imageInfo.getAvgCols()["Red"]!
var avgGreen = imageInfo.getAvgCols()["Green"]!
var avgBlue = imageInfo.getAvgCols()["Blue"]!
func applyFilter(filter: String, modifier: Int) -> UIImage{
for y in 0..<rgbaImage!.height{
for x in 0..<rgbaImage!.width{
let index = y * rgbaImage!.width + x
var pixel = rgbaImage!.pixels[index]
let redDelta = Int(pixel.red) - avgRed
let greenDelta = Int(pixel.green) - avgGreen
let blueDelta = Int(pixel.blue) - avgBlue
let redModifier = Double(modifier) * 0.49
let greenModifier = Double(modifier) * 0.25
let blueModifier = Double(modifier) * 0.07
pixel.red = UInt8(max(min(255, avgRed + Int(redModifier) * redDelta),0))
pixel.green = UInt8(max(min(255, avgGreen + Int(greenModifier) * greenDelta),0))
pixel.blue = UInt8(max(min(255, avgBlue + Int(blueModifier) * blueDelta),0))
rgbaImage!.pixels[index] = pixel
}
}
let newImage = rgbaImage!.toUIImage()!
return newImage
}
}
NOTE: Since I am using the play ground, both classes are in the same file. For clarification just take my previous code, and replace everything below class ImageInfo with class ImageEdit
I would need access to avgRed, avgGreen, and avgBlue, but all I'm getting is
instance member 'imageInfo' cannot be used on type 'ImageEdit'
and since I'm a noob at Swift, I'm not exactly sure how to fix it.
Did I miss anything?
EDIT: Here's RGBAImage. I did not write this code, it is free to download to anyone who takes the online course.
import UIKit
public struct Pixel {
public var value: UInt32
public var red: UInt8 {
get {
return UInt8(value & 0xFF)
}
set {
value = UInt32(newValue) | (value & 0xFFFFFF00)
}
}
public var green: UInt8 {
get {
return UInt8((value >> 8) & 0xFF)
}
set {
value = (UInt32(newValue) << 8) | (value & 0xFFFF00FF)
}
}
public var blue: UInt8 {
get {
return UInt8((value >> 16) & 0xFF)
}
set {
value = (UInt32(newValue) << 16) | (value & 0xFF00FFFF)
}
}
public var alpha: UInt8 {
get {
return UInt8((value >> 24) & 0xFF)
}
set {
value = (UInt32(newValue) << 24) | (value & 0x00FFFFFF)
}
}
}
public struct RGBAImage {
public var pixels: UnsafeMutableBufferPointer<Pixel>
public var width: Int
public var height: Int
public init?(image: UIImage) {
guard let cgImage = image.CGImage else { return nil }
// Redraw image for correct pixel format
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.ByteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.PremultipliedLast.rawValue & CGBitmapInfo.AlphaInfoMask.rawValue
width = Int(image.size.width)
height = Int(image.size.height)
let bytesPerRow = width * 4
let imageData = UnsafeMutablePointer<Pixel>.alloc(width * height)
guard let imageContext = CGBitmapContextCreate(imageData, width, height, 8, bytesPerRow, colorSpace, bitmapInfo) else { return nil }
CGContextDrawImage(imageContext, CGRect(origin: CGPointZero, size: image.size), cgImage)
pixels = UnsafeMutableBufferPointer<Pixel>(start: imageData, count: width * height)
}
public func toUIImage() -> UIImage? {
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.ByteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.PremultipliedLast.rawValue & CGBitmapInfo.AlphaInfoMask.rawValue
let bytesPerRow = width * 4
let imageContext = CGBitmapContextCreateWithData(pixels.baseAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo, nil, nil)
guard let cgImage = CGBitmapContextCreateImage(imageContext) else {return nil}
let image = UIImage(CGImage: cgImage)
return image
}
}
The problem is that you are calling the attribute imageInfo inside the ImageEdit before initialising the class.
It is only when the initialisation is called that everything becomes something. So avgRed can't get values from a function of imageInfo because it has not been constructed yet.
Doing this inside a function (init() is basically a function) is just find. Doing this inside a class doesn't work.
This does not work :
class A {
let attribute : Int = 0
init() {
}
}
class B {
let attributeA = A()
let attributeB = attributeA.attribute // this is your problem
}
This does :
For all the attributes that rely on other attributes to be constructed, you use a late init. All the attributes that rely on external input, failable functions,... you declare as an optional.
Late init : var / let attrName : Type
Optional : var / let attrName : Type ? / !
class B {
let attributeA = A()
let attributeB : Int
var attributeC : Int?
init () {
attributeB = attributeA.attribute
attributeC = somethingOptional()
}
func somethingOptional() -> Int? {
return 0 // not really optional, but just illustration
}
}
So for you it would be something like this :
class ImageEdit {
let imageInfo = ImageInfo()
let dict : [String:Int]
init () {
dict = imageInfo.getAvgCols()
}
}

Convert an image into a 2D array (or equivalent) in Apple Swift

I'm wondering how I can turn a UIImage into something usable and modifiable. Java code to handle what I need would look something like this:
BufferedImage img= ImageIO.read(file);
Raster raster=img.getData();
int w=raster.getWidth(),h=raster.getHeight();
int pixels[][]=new int[w][h];
for (int x=0;x<w;x++)
{
for(int y=0;y<h;y++)
{
pixels[x][y]=raster.getSample(x,y,0);
}
}
I need to modify the alpha values in an image by visiting each pixel in the image.
Untested, but I think this will either work or should be very close.
import UIKit
import CoreGraphics
var uiimage = UIImage(contentsOfFile: "/PATH/TO/image.png")
var image = uiimage.CGImage
let width = CGImageGetWidth(image)
let height = CGImageGetHeight(image)
let colorspace = CGColorSpaceCreateDeviceRGB()
let bytesPerRow = (4 * width);
let bitsPerComponent :UInt = 8
let pixels = UnsafePointer<UInt8>(malloc(width*height*4))
var context = CGBitmapContextCreate(pixels, width, height, bitsPerComponent, bytesPerRow, colorspace,
CGBitmapInfo());
CGContextDrawImage(context, CGRectMake(0, 0, CGFloat(width), CGFloat(height)), image)
for x in 0..width {
for y in 0..height {
//Here is your raw pixels
let offset = 4*((Int(width) * Int(y)) + Int(x))
let alpha = pixels[offset]
let red = pixels[offset+1]
let green = pixels[offset+2]
let blue = pixels[offset+3]
}
}
If you really need conversion to 2D array, render image into byte array via CGContext and then split array to parts. CGContext uses 0...255 color range instead of 0...1. Byte array will be in rgba format.
Sample code with conversion to 0...1:
import UIKit
import CoreGraphics
func pixelData() -> [UInt8]? {
let dataSize = size.width * size.height * 4
var pixelData = [UInt8](repeating: 0, count: Int(dataSize))
let colorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: &pixelData,
width: Int(size.width),
height: Int(size.height),
bitsPerComponent: 8,
bytesPerRow: 4 * Int(size.width),
space: colorSpace,
bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue)
guard let cgImage = self.cgImage,
let context = context else { return nil }
context.draw(cgImage, in: CGRect(origin: .zero, size: size))
return pixelData
}
func pixelMatrix() -> [[[Float]]]? {
guard let pixels = pixelData() else {
return nil
}
var data: [[[Float]]] = []
let width = Int(size.width)
let height = Int(size.height)
for y in 0..<height {
var row: [[Float]] = []
for x in 0..<width {
let offset = 4 * ((width * y) + x)
let red = Float(pixels[offset]) / 255
let green = Float(pixels[offset + 1]) / 255
let blue = Float(pixels[offset + 2]) / 255
let alpha = Float(pixels[offset + 3]) / 255
let pixel = [red, green, blue, alpha]
row.append(pixel)
}
data.append(row)
}
return data
}