Create solid color texture from a scratch - swift

I'm creating an engine and need a dummy single-pixel texture for objects which do not have one of required materials. It should be 4 channel texture and example invocation could look as follows:
let dummy = device.makeSolidTexture(device: device, color: simd_float4(0, 0, 1, 1))!
Code I'm using
extension MTLDevice {
func makeSolidTexture(device: MTLDevice,
color: simd_float4,
pixelFormat: MTLPixelFormat = .bgra8Unorm) -> MTLTexture? {
let descriptor = MTLTextureDescriptor()
descriptor.width = 1
descriptor.height = 1
descriptor.mipmapLevelCount = 1
descriptor.storageMode = .managed
descriptor.arrayLength = 1
descriptor.sampleCount = 1
descriptor.cpuCacheMode = .writeCombined
descriptor.allowGPUOptimizedContents = false
descriptor.pixelFormat = pixelFormat
descriptor.textureType = .type2D
descriptor.usage = .shaderRead
guard let texture = device.makeTexture(descriptor: descriptor) else {
return nil
}
let origin = MTLOrigin(x: 0, y: 0, z: 0)
let size = MTLSize(width: texture.width, height: texture.height, depth: texture.depth)
let region = MTLRegion(origin: origin, size: size)
withUnsafePointer(to: color) { ptr in
texture.replace(region: region, mipmapLevel: 0, withBytes: ptr, bytesPerRow: 4)
}
return texture
}
}
The problem is that when I inspect the texture after capturing a frame I see:
What do I miss? Why is it black?

It turned out that I made two mistakes there. First is that a textures MUST be aligned to 256 bytes, so I extended to size to 8 x 8 what considering the pixel format meets the requirement. My second mistake was to create a texture from float vector directly. Underlying type of the texture store the number in range 0-255, so the float vector passed to the function must be scaled and then converted to simd_uchar4 type.
Here is how it should look like
extension MTLDevice {
func makeSolid2DTexture(device: MTLDevice,
color: simd_float4,
pixelFormat: MTLPixelFormat = .bgra8Unorm) -> MTLTexture? {
let descriptor = MTLTextureDescriptor()
descriptor.width = 8
descriptor.height = 8
descriptor.mipmapLevelCount = 1
descriptor.storageMode = .managed
descriptor.arrayLength = 1
descriptor.sampleCount = 1
descriptor.cpuCacheMode = .writeCombined
descriptor.allowGPUOptimizedContents = false
descriptor.pixelFormat = pixelFormat
descriptor.textureType = .type2D
descriptor.usage = .shaderRead
guard let texture = device.makeTexture(descriptor: descriptor) else {
return nil
}
let origin = MTLOrigin(x: 0, y: 0, z: 0)
let size = MTLSize(width: texture.width, height: texture.height, depth: texture.depth)
let region = MTLRegion(origin: origin, size: size)
let mappedColor = simd_uchar4(color * 255)
Array<simd_uchar4>(repeating: mappedColor, count: 64).withUnsafeBytes { ptr in
texture.replace(region: region, mipmapLevel: 0, withBytes: ptr.baseAddress!, bytesPerRow: 32)
}
return texture
}
}
// bgra format in range [0, 1]
let foo = device.makeSolid2DTexture(device: device, color: simd_float4(1, 0, 0, 1))!

Related

How to create custom empty Texture correctly?

I need to create a MTLTexture with my custom data (which is currently filled with 0), in order to do it I use such an implementation
private func createTexture(frame: UnsafeMutableRawPointer) -> MTLTexture? {
let width = 2048
let height = 2048
let textureDescriptor = MTLTextureDescriptor.texture2DDescriptor(
pixelFormat: MTLPixelFormat.rgba8Unorm,
width: width,
height: height,
mipmapped: false)
textureDescriptor.usage = [.shaderWrite, .shaderRead]
guard let texture: MTLTexture = device?.makeTexture(descriptor: textureDescriptor) else
{
logger?.log(severity: .error, msg: "create texture FAILED.")
return nil
}
let region = MTLRegion.init(origin: MTLOrigin.init(x: 0, y: 0, z: 0), size: MTLSize.init(width: texture.width, height: texture.height, depth: 4));
//MARK: >>> JUST FOR TEST
let count = width * height * 4
let stride = MemoryLayout<CChar>.stride
let alignment = MemoryLayout<CChar>.alignment
let byteCount = stride * count
let p = UnsafeMutableRawPointer.allocate(byteCount: byteCount, alignment: alignment)
let data = p.initializeMemory(as: CChar.self, repeating: 0, count: count)
//MARK: <<<
texture.replace(region: region, mipmapLevel: 0, withBytes: data, bytesPerRow: width * 4)
return texture
}
So, here I created a descriptor with 4 channels, then I created a region with depth: 4, then created UnsafeMutableRawPointer filled with data stride * count size, but I get an error in this line
texture.replace(region: region, mipmapLevel: 0, withBytes: data, bytesPerRow: width * 4)
_validateReplaceRegion:155: failed assertion `(origin.z + size.depth)(4) must be <= depth(1).'
what am I doing wrong?
The depth property in the following line is incorrect:
let region = MTLRegion.init(origin: MTLOrigin.init(x: 0, y: 0, z: 0), size: MTLSize.init(width: texture.width, height: texture.height, depth: 4));
The depth property describes the number of elements in the z dimension. For 2D texture it should be 1.

Swift: Apply LUT (Lookup Table) to an image using CIColorCube slow performance

I'm applying a LUT (from a .png - Example LUT Image) to an image using CIColorCube. It works well. The only problem I'm facing is that when I create the buttons thumbnails the app stops for a few seconds.
The buttons look like this -> Buttons Example Image
This is my code:
#IBOutlet weak var filtersScrollView: UIScrollView!
var filters = ["-", "Filter1", "Filter2", "Filter3", "Filter4"]
override func viewDidLoad() {
createFilters()
}
func createFilters() {
var x: CGFloat = 10
let y: CGFloat = 0
let width: CGFloat = 60
let height: CGFloat = 83
let gap: CGFloat = 2
for i in 0..<filters.count {
let filterButton = UIButton(type: .custom)
filterButton.frame = CGRect(x: x, y: y, width: width, height: height)
filterButton.imageView?.contentMode = .scaleAspectFill
filterButton.setTitleColor(#colorLiteral(red: 1, green: 1, blue: 1, alpha: 0), for: .normal)
let text = UILabel()
text.frame = CGRect(x: 0, y: height - 21, width: filterButton.frame.width, height: 21)
text.textAlignment = .center
text.backgroundColor = #colorLiteral(red: 0.9372549057, green: 0.3490196168, blue: 0.1921568662, alpha: 1)
text.textColor = .white
text.font = .systemFont(ofSize: 8.5, weight: .medium)
filterButton.addSubview(text)
filtersScrollView.insertSubview(filterButton, at: 1)
x += width + gap
if i == 0 {
filterButton.setImage(originalImage, for: .normal)
text.text = "-"
text.backgroundColor = #colorLiteral(red: 0.1215686275, green: 0.1215686275, blue: 0.1215686275, alpha: 1)
}
else {
// THIS LINE MAKES THE APP STOP FOR A FEW SECONDS
let filteredImage = filterFromLUT(inputImage: originalCIImage, lut: "\(filters[i])")?.outputImage
filterButton.setImage(UIImage(ciImage: filteredImage!), for: .normal)
text.text = "\(filters[i])"
}
}
filtersScrollView.contentSize = CGSize(width: x, height: height)
}
func filterFromLUT(inputImage: CIImage, lut: String) -> CIFilter? {
let dimension = 64
let lutImage = UIImage(named: lut)!.cgImage
let width = lutImage!.width
let height = lutImage!.height
let rowNum = width / dimension
let columnNum = height / dimension
let bitmap = createBitmap(image: lutImage!)
let dataSize = dimension * dimension * dimension * MemoryLayout<Float>.size * 4
var array = Array<Float>(repeating: 0, count: dataSize)
var bitmapOffest: Int = 0
var z: Int = 0
for _ in stride(from: 0, to: rowNum, by: 1) {
for y in stride(from: 0, to: dimension, by: 1) {
let tmp = z
for _ in stride(from: 0, to: columnNum, by: 1) {
for x in stride(from: 0, to: dimension, by: 1) {
let dataOffset = (z * dimension * dimension + y * dimension + x) * 4
let position = bitmap!
.advanced(by: bitmapOffest)
array[dataOffset + 0] = Float(position
.advanced(by: 0)
.pointee) / 255
array[dataOffset + 1] = Float(position
.advanced(by: 1)
.pointee) / 255
array[dataOffset + 2] = Float(position
.advanced(by: 2)
.pointee) / 255
array[dataOffset + 3] = Float(position
.advanced(by: 3)
.pointee) / 255
bitmapOffest += 4
}
z += 1
}
z = tmp
}
z += columnNum
}
free(bitmap)
let data = Data.init(bytes: array, count: dataSize)
// Create CIColorCube filter
let filter = CIFilter.colorCube()
filter.inputImage = inputImage
filter.cubeData = data
filter.cubeDimension = Float(dimension)
return filter
}
func createBitmap(image: CGImage) -> UnsafeMutablePointer<UInt8>? {
let width = image.width
let height = image.height
let bitsPerComponent = 8
let bytesPerRow = width * 4
let bitmapSize = bytesPerRow * height
guard let data = malloc(bitmapSize) else {
return nil
}
let context = CGContext(
data: data,
width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bytesPerRow: bytesPerRow,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue,
releaseCallback: nil,
releaseInfo: nil)
context!.draw(image, in: CGRect(x: 0, y: 0, width: width, height: height))
return data.bindMemory(to: UInt8.self, capacity: bitmapSize)
}
I think that maybe the CGContext inside the createBitmap() function is causing this. Does anyone know how to solve this?
There are a few things you can do to improve performance:
Currently, you are processing the original input image (which I assume is pretty large) just to display the result in a 60 x 83 button. Consider scaling the image down first before putting it through the filters.
You can avoid blocking the UI by making the image processing code asynchronous. Just create the buttons in their proper size and DispatchQueue.global().async { ... } the image processing.
Don't use .setImage(UIImage(ciImage: filteredImage). In my experience creating a UIImage from a CIImage this way to very unpredictable. Rather use a CIContext to render the filtered image into a CGImage and convert that into a UIImage afterward. Also try to re-use a single CIContext instead of re-creating it again for each image.
The code for converting the LUT image into a float data array can be sped-up by using vDSP (see below).
Using vDSP for creating the LUT data:
let lutImage = UIImage(named: lut)!.cgImage
let dimension = lutImage.height
// get data from image
let lutImageData = lutImage.dataProvider?.data
let lutImageDataPtr = CFDataGetBytePtr(lutImageData)!
// convert to float and divide by 255
let numElements = dimension * dimension * dimension * 4
let inDataFloat = UnsafeMutablePointer<Float>.allocate(capacity: numElements)
vDSP_vfltu8(lutImageDataPtr, 1, inDataFloat, 1, vDSP_Length(numElements))
var div: Float = 255.0
vDSP_vsdiv(inDataFloat, 1, &div, inDataFloat, 1, vDSP_Length(numElements))
// convert buffer pointer to data
let lutData = NSData(bytesNoCopy: inDataFloat, length: numElements * MemoryLayout<Float>.size, freeWhenDone: true)

Creating CGImage/UIImage from grayscale matrix

I have a matrix of greyscale image pixels, for example:
[ [0, 0, 125],
[10, 50, 255],
[90, 0, 255] ]
My goal is to apply a tint to it (UIColor) and export a CGImage/UIImage from the structure that holds it.
public typealias Pixel = UInt8
extension UIColor {
var red: Float { return Float(CIColor(color: self).red * 255) }
var green: Float { return Float(CIColor(color: self).green * 255) }
var blue: Float { return Float(CIColor(color: self).blue * 255) }
var alpha: Float { return Float(CIColor(color: self).alpha * 255) }
}
public struct PixelData {
let r: UInt8
let g: UInt8
let b: UInt8
let a: UInt8
}
public struct Map {
let pixelCount: UInt
let pixels: [Pixel] //all pixels of an image, linear
let dimension: UInt //square root of pixel count
let tintColor: UIColor = UIColor(red: 9/255, green: 133/255, blue: 61/255, alpha: 1)
public var image: UIImage? {
var pixelsData = [PixelData]()
pixelsData.reserveCapacity(Int(pixelCount) * 3)
let alpha = UInt8(tintColor.alpha)
let redValue = tintColor.red
let greenValue = tintColor.green
let blueValue = tintColor.blue
let red: [PixelData] = pixels.map {
let redInt: UInt8 = UInt8((Float($0) / 255.0) * redValue)
return PixelData(r: redInt, g: 0, b: 0, a: alpha)
}
let green: [PixelData] = pixels.map {
let greenInt: UInt8 = UInt8((Float($0) / 255.0) * greenValue)
return PixelData(r: 0, g: greenInt, b: 0, a: alpha) }
let blue: [PixelData] = pixels.map {
let blueInt: UInt8 = UInt8((Float($0) / 255.0) * blueValue)
return PixelData(r: 0, g: 0, b: blueInt, a: alpha) }
pixelsData.append(contentsOf: red)
pixelsData.append(contentsOf: green)
pixelsData.append(contentsOf: blue)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
let bitsPerComponent = 8
let bitsPerPixel = 32
let dimension: Int = Int(self.dimension)
var data = pixelsData
guard let providerRef = CGDataProvider(
data: NSData(bytes: &data, length: data.count * MemoryLayout<PixelData>.size)
) else { return nil }
if let cgim = CGImage(
width: dimension,
height: dimension,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: dimension * MemoryLayout<PixelData>.size,
space: rgbColorSpace,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent
) {
return UIImage(cgImage: cgim)
}
return nil
}
}
The problem is the output looks gibberish. I have used this tutorial and this SO thread but with no success. The result in the playground is:
(the output is there, just barely visible)
Any help appreciated!
There are two key issues.
The code is calculating all the red values for every grayscale pixel and creating the four byte PixelData for each (even though only the red channel is populated) and adding that to the pixelsData array. It then repeats that for the green values, and then again for the blue values. That results in three times as much data as one needs for the image, and only the red channel data is being used.
Instead, we should calculate the RGBA values once, create a PixelData for each, and repeat this pixel by pixel.
The premultipliedFirst means ARGB. But your structure is using RGBA, so you want premultipliedLast.
Thus:
func generateTintedImage(completion: #escaping (UIImage?) -> Void) {
DispatchQueue.global(qos: .userInitiated).async {
let image = self.tintedImage()
DispatchQueue.main.async {
completion(image)
}
}
}
private func tintedImage() -> UIImage? {
let tintRed = tintColor.red
let tintGreen = tintColor.green
let tintBlue = tintColor.blue
let tintAlpha = tintColor.alpha
let data = pixels.map { pixel -> PixelData in
let red = UInt8((Float(pixel) / 255) * tintRed)
let green = UInt8((Float(pixel) / 255) * tintGreen)
let blue = UInt8((Float(pixel) / 255) * tintBlue)
let alpha = UInt8(tintAlpha)
return PixelData(r: red, g: green, b: blue, a: alpha)
}.withUnsafeBytes { Data($0) }
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue)
let bitsPerComponent = 8
let bitsPerPixel = 32
guard
let providerRef = CGDataProvider(data: data as CFData),
let cgImage = CGImage(width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: width * MemoryLayout<PixelData>.stride,
space: rgbColorSpace,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent)
else {
return nil
}
return UIImage(cgImage: cgImage)
}
I’ve also renamed a few variables, used stride instead of size, replaced dimension with width and height so I could process non-square images, etc.
I also would advise against using a computed property for anything this computationally intense, so I gave this an asynchronous method, which you might use as follows:
let map = Map(with: image)
map.generateTintedImage { image in
self.tintedImageView.image = image
}
Anyway, the above yields the following, where the rightmost image is your tinted image:
Needless to say, to convert your matrix into your pixels array, you can just flatten the array of arrays:
let matrix: [[Pixel]] = [
[0, 0, 125],
[10, 50, 255],
[90, 0, 255]
]
pixels = matrix.flatMap { $0 }
Here is a parallelized rendition which is also slightly more efficient with respect to the memory buffer:
private func tintedImage() -> UIImage? {
let tintAlpha = tintColor.alpha
let tintRed = tintColor.red / 255
let tintGreen = tintColor.green / 255
let tintBlue = tintColor.blue / 255
let alpha = UInt8(tintAlpha)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue
let bitsPerComponent = 8
let bytesPerRow = width * MemoryLayout<PixelData>.stride
guard
let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo),
let data = context.data
else {
return nil
}
let buffer = data.bindMemory(to: PixelData.self, capacity: width * height)
DispatchQueue.concurrentPerform(iterations: height) { row in
let start = width * row
let end = start + width
for i in start ..< end {
let pixel = pixels[i]
let red = UInt8(Float(pixel) * tintRed)
let green = UInt8(Float(pixel) * tintGreen)
let blue = UInt8(Float(pixel) * tintBlue)
buffer[i] = PixelData(r: red, g: green, b: blue, a: alpha)
}
}
return context.makeImage()
.flatMap { UIImage(cgImage: $0) }
}

Why can't I read those MTLTexture pixels?

I am trying to get the pixels of a MTLTexture this way:
let pixelCount = compareTexture.width * compareTexture.height
let region = MTLRegionMake2D(0, 0, compareTexture.width, compareTexture.height)
var textureComponentsArray = Array<float4>(repeating: float4(0), count: pixelCount)
textureComponentsArray.withUnsafeMutableBytes {
compareTexture.getBytes($0.baseAddress!, bytesPerRow: (MemoryLayout<float4>.size * compareTexture.width), from: region, mipmapLevel: 0)
}
print(textureComponentsArray.first!)
Unfortunately most elements are either NaN or equal to the values which I initialised my textureComponentsArray with. For instance this code prints :
float4(nan, nan, nan, nan)
I am on macOS and my MTLTexture has those properties:
let textureDescriptor = MTLTextureDescriptor()
textureDescriptor.width = imageTexture.width
textureDescriptor.height = imageTexture.height
textureDescriptor.pixelFormat = imageTexture.pixelFormat
textureDescriptor.resourceOptions = .storageModeManaged
textureDescriptor.storageMode = .managed
textureDescriptor.usage = [.renderTarget, .shaderRead, .shaderWrite]
I do use the managed storage mode so the data should be available to CPU, I don't understand why it doesn't work.
Thank you.
EDIT :
I am trying to use :
blitCommandEncoder.synchronize(resource: compareTexture)
And I do wait the command buffer to complete but there is still the issue.
I needed an answer for this question for iOS. I'm leaving here the extension I wrote for my needs.
import Metal
extension MTLTexture {
func getPixels<T> (_ region: MTLRegion? = nil, mipmapLevel: Int = 0) -> UnsafeMutablePointer<T> {
let fromRegion = region ?? MTLRegionMake2D(0, 0, self.width, self.height)
let width = fromRegion.size.width
let height = fromRegion.size.height
let bytesPerRow = MemoryLayout<T>.stride * width
let data = UnsafeMutablePointer<T>.allocate(capacity: bytesPerRow * height)
self.getBytes(data, bytesPerRow: bytesPerRow, from: fromRegion, mipmapLevel: mipmapLevel)
return data
}
}
You can simply call the function. But remember that you are responsible for deallocating the memory used for the data object.
You can read the pixels if they are in different formats too. For a MTLPixelFormat.rgba32Float texture I'm retrieving the data as:
let pixels: UnsafeMutablePointer<Float32> = texture.getPixels()
defer {
pixels.deallocate()
}
let capacity = texture.width * texture.height * MemoryLayout<Float32>.stride
for i in stride(from: 0, to: capacity, by: 4) {
let l = pixels[i + 0]
let a = pixels[i + 1]
let b = pixels[i + 2]
let alpha = pixels[i + 3]
}

Convert an image into a 2D array (or equivalent) in Apple Swift

I'm wondering how I can turn a UIImage into something usable and modifiable. Java code to handle what I need would look something like this:
BufferedImage img= ImageIO.read(file);
Raster raster=img.getData();
int w=raster.getWidth(),h=raster.getHeight();
int pixels[][]=new int[w][h];
for (int x=0;x<w;x++)
{
for(int y=0;y<h;y++)
{
pixels[x][y]=raster.getSample(x,y,0);
}
}
I need to modify the alpha values in an image by visiting each pixel in the image.
Untested, but I think this will either work or should be very close.
import UIKit
import CoreGraphics
var uiimage = UIImage(contentsOfFile: "/PATH/TO/image.png")
var image = uiimage.CGImage
let width = CGImageGetWidth(image)
let height = CGImageGetHeight(image)
let colorspace = CGColorSpaceCreateDeviceRGB()
let bytesPerRow = (4 * width);
let bitsPerComponent :UInt = 8
let pixels = UnsafePointer<UInt8>(malloc(width*height*4))
var context = CGBitmapContextCreate(pixels, width, height, bitsPerComponent, bytesPerRow, colorspace,
CGBitmapInfo());
CGContextDrawImage(context, CGRectMake(0, 0, CGFloat(width), CGFloat(height)), image)
for x in 0..width {
for y in 0..height {
//Here is your raw pixels
let offset = 4*((Int(width) * Int(y)) + Int(x))
let alpha = pixels[offset]
let red = pixels[offset+1]
let green = pixels[offset+2]
let blue = pixels[offset+3]
}
}
If you really need conversion to 2D array, render image into byte array via CGContext and then split array to parts. CGContext uses 0...255 color range instead of 0...1. Byte array will be in rgba format.
Sample code with conversion to 0...1:
import UIKit
import CoreGraphics
func pixelData() -> [UInt8]? {
let dataSize = size.width * size.height * 4
var pixelData = [UInt8](repeating: 0, count: Int(dataSize))
let colorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: &pixelData,
width: Int(size.width),
height: Int(size.height),
bitsPerComponent: 8,
bytesPerRow: 4 * Int(size.width),
space: colorSpace,
bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue)
guard let cgImage = self.cgImage,
let context = context else { return nil }
context.draw(cgImage, in: CGRect(origin: .zero, size: size))
return pixelData
}
func pixelMatrix() -> [[[Float]]]? {
guard let pixels = pixelData() else {
return nil
}
var data: [[[Float]]] = []
let width = Int(size.width)
let height = Int(size.height)
for y in 0..<height {
var row: [[Float]] = []
for x in 0..<width {
let offset = 4 * ((width * y) + x)
let red = Float(pixels[offset]) / 255
let green = Float(pixels[offset + 1]) / 255
let blue = Float(pixels[offset + 2]) / 255
let alpha = Float(pixels[offset + 3]) / 255
let pixel = [red, green, blue, alpha]
row.append(pixel)
}
data.append(row)
}
return data
}