I want to pick colour of specific pixel of UIImage in Swift 3 and this method is called ~10k times.
func pixelColour(_ pixelPosition: CGPoint) {
if !CGRect(x: 0.0, y: 0.0, width: self.size.width, height: self.size.height).contains(pixelPosition) {
return false
}
let pointX = trunc(pixelPosition.x);
let pointY = trunc(pixelPosition.y);
let cgImage = self.cgImage;
let width = self.size.width;
let height = self.size.height;
let colorSpace = CGColorSpaceCreateDeviceRGB();
let bytesPerPixel = 4;
let bytesPerRow = bytesPerPixel * 1;
let bitsPerComponent = 8;
let pixelData = UnsafeMutablePointer<CUnsignedChar>.allocate(capacity: 4)
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue)
let context = CGContext(data: pixelData, width: 1, height: 1, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)
context?.setBlendMode(CGBlendMode.copy);
context?.translateBy(x: -pointX, y: pointY-CGFloat(height));
// This line takes too much memory, how to release memory here?
context?.draw(cgImage!, in: CGRect(x: 0.0, y: 0.0, width: CGFloat(width), height: CGFloat(height)));
print("\(pixelData[0]) \(pixelData[1]) \(pixelData[2]) ")
pixelData.deallocate(capacity: 4)
}
Unfortunately it seems that memory is not released, because it crashes after checking ~500 pixels. How I can solve this problem?
You have not shown how pixelColour is called, but I presume that it is in some sort of loop. If so, surround your repeated call to pixelColour with an autoreleasepool{...} call to release the accumulated memory each time through your loop:
let p = // next CGPoint
autoreleasepool {
pixelColour(p)
}
Taken from How do I get the color of a pixel in a UIImage with Swift?
and converted to Swift 3
extension UIImage{
func getPixelColor(pos: CGPoint) -> UIColor? {
guard let provider=cgImage?.dataProvider else{
return nil
}
let pixelData = provider.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
This should not take to much memory, since it uses pointers so it doesn't load the entire image.
hope it helps
Related
I have a function cropAlpha() that trims the extra space defined by the transparency.
func cropAlpha() -> UIImage {
let cgImage = self.cgImage!
let width = cgImage.width
let height = cgImage.height
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bytesPerPixel:Int = 4
let bytesPerRow = bytesPerPixel * width
let bitsPerComponent = 8
let bitmapInfo: UInt32 = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Big.rawValue
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo),
let ptr = context.data?.assumingMemoryBound(to: UInt8.self)
else { return self }
context.draw(self.cgImage!, in: CGRect(x: 0, y: 0, width: width, height: height))
var minX = width
var minY = height
var maxX: Int = 0
var maxY: Int = 0
for x in 1 ..< width {
for y in 1 ..< height {
let i = bytesPerRow * Int(y) + bytesPerPixel * Int(x)
let a = CGFloat(ptr[i + 3]) / 255.0
if a == 1 {
if (x < minX) { minX = x }
if (x > maxX) { maxX = x }
if (y < minY) { minY = y }
if (y > maxY) { maxY = y }
}
}
}
let rect = CGRect(x: CGFloat(minX),y: CGFloat(minY), width: CGFloat(maxX - minX), height: CGFloat(maxY-minY))
let croppedImage = self.cgImage!.cropping(to: rect)!
let ret = UIImage(cgImage: croppedImage)
return ret
}
The image returned by this function has transparent elements and I put it in the ImageView: presenterImageView.image = imagePNG. It works as it should. But when I try to save UIImage to Photo Gallery, transparent background turns white.
let image = maskedImage?.cropAlpha()
let imagePNGData = image!.pngData()
let imagePNG = UIImage(data: imagePNGData!)
UIImageWriteToSavedPhotosAlbum(imagePNG!, nil, nil, nil)
If I don't use that function, I get the result I want, but the image has too much wasted space. I don't understand what could be the reason. Any ideas?
The problem is that UIImageWriteToSavedPhotosAlbum does not handle properly saving a UIImage with premultiplied alpha (or at least the result of saving such image is not what you expect) and your cropping method uses premultipliedLast format. You also can't just simply change CGImageAlphaInfo to a non-premultiplied format because it is not supported there (you will see an error CGBitmapContextCreate: unsupported parameter combination if you try that). But what you can do is convert the cropped image to CIImage, unpremultiply alpha and convert back to UIImage. To do that your saving code could look like below (however I recommend removing force unwrapping from this code if you plan to use it in final app):
let image = maskedImage?.cropAlpha()
let ciImage = CIImage(image: image!)!.unpremultiplyingAlpha()
let uiImage = UIImage(ciImage: ciImage)
let imagePNGData = uiImage.pngData()
let imagePNG = UIImage(data: imagePNGData!)
UIImageWriteToSavedPhotosAlbum(imagePNG!, nil, nil, nil)
Began crashing with EXC_BAD_ACCESS when building app after new Xcode 11.4 update.
Only crashes when building with release build configuration, not debugging configuration.
Breaks at: let img = ctx.makeImage()!
func image() -> CGImage {
if let image = generatedImage {
return image
}
let w = Int(size.width * scale)
let h = Int(size.height * scale)
let bitsPerComponent: Int = MemoryLayout<UInt8>.size * 8
let bytesPerPixel: Int = bitsPerComponent * 4 / 8
let bytesPerRow = w * bytesPerPixel
var data = [ARGB]()
for y in 0..<h {
for x in 0..<w {
let c = pixelDataForGradient(at: CGPoint(x: x, y: y),
size: CGSize(width: w, height: h),
colors: colors,
locations: locations,
startPoint: startPoint,
endPoint: endPoint)
data.append(c)
}
}
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
let ctx = CGContext(data: &data, width: w, height: h, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)!
ctx.interpolationQuality = .none
ctx.setShouldAntialias(false)
let img = ctx.makeImage()!
generatedImage = img
return img
}
I have a matrix of greyscale image pixels, for example:
[ [0, 0, 125],
[10, 50, 255],
[90, 0, 255] ]
My goal is to apply a tint to it (UIColor) and export a CGImage/UIImage from the structure that holds it.
public typealias Pixel = UInt8
extension UIColor {
var red: Float { return Float(CIColor(color: self).red * 255) }
var green: Float { return Float(CIColor(color: self).green * 255) }
var blue: Float { return Float(CIColor(color: self).blue * 255) }
var alpha: Float { return Float(CIColor(color: self).alpha * 255) }
}
public struct PixelData {
let r: UInt8
let g: UInt8
let b: UInt8
let a: UInt8
}
public struct Map {
let pixelCount: UInt
let pixels: [Pixel] //all pixels of an image, linear
let dimension: UInt //square root of pixel count
let tintColor: UIColor = UIColor(red: 9/255, green: 133/255, blue: 61/255, alpha: 1)
public var image: UIImage? {
var pixelsData = [PixelData]()
pixelsData.reserveCapacity(Int(pixelCount) * 3)
let alpha = UInt8(tintColor.alpha)
let redValue = tintColor.red
let greenValue = tintColor.green
let blueValue = tintColor.blue
let red: [PixelData] = pixels.map {
let redInt: UInt8 = UInt8((Float($0) / 255.0) * redValue)
return PixelData(r: redInt, g: 0, b: 0, a: alpha)
}
let green: [PixelData] = pixels.map {
let greenInt: UInt8 = UInt8((Float($0) / 255.0) * greenValue)
return PixelData(r: 0, g: greenInt, b: 0, a: alpha) }
let blue: [PixelData] = pixels.map {
let blueInt: UInt8 = UInt8((Float($0) / 255.0) * blueValue)
return PixelData(r: 0, g: 0, b: blueInt, a: alpha) }
pixelsData.append(contentsOf: red)
pixelsData.append(contentsOf: green)
pixelsData.append(contentsOf: blue)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
let bitsPerComponent = 8
let bitsPerPixel = 32
let dimension: Int = Int(self.dimension)
var data = pixelsData
guard let providerRef = CGDataProvider(
data: NSData(bytes: &data, length: data.count * MemoryLayout<PixelData>.size)
) else { return nil }
if let cgim = CGImage(
width: dimension,
height: dimension,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: dimension * MemoryLayout<PixelData>.size,
space: rgbColorSpace,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent
) {
return UIImage(cgImage: cgim)
}
return nil
}
}
The problem is the output looks gibberish. I have used this tutorial and this SO thread but with no success. The result in the playground is:
(the output is there, just barely visible)
Any help appreciated!
There are two key issues.
The code is calculating all the red values for every grayscale pixel and creating the four byte PixelData for each (even though only the red channel is populated) and adding that to the pixelsData array. It then repeats that for the green values, and then again for the blue values. That results in three times as much data as one needs for the image, and only the red channel data is being used.
Instead, we should calculate the RGBA values once, create a PixelData for each, and repeat this pixel by pixel.
The premultipliedFirst means ARGB. But your structure is using RGBA, so you want premultipliedLast.
Thus:
func generateTintedImage(completion: #escaping (UIImage?) -> Void) {
DispatchQueue.global(qos: .userInitiated).async {
let image = self.tintedImage()
DispatchQueue.main.async {
completion(image)
}
}
}
private func tintedImage() -> UIImage? {
let tintRed = tintColor.red
let tintGreen = tintColor.green
let tintBlue = tintColor.blue
let tintAlpha = tintColor.alpha
let data = pixels.map { pixel -> PixelData in
let red = UInt8((Float(pixel) / 255) * tintRed)
let green = UInt8((Float(pixel) / 255) * tintGreen)
let blue = UInt8((Float(pixel) / 255) * tintBlue)
let alpha = UInt8(tintAlpha)
return PixelData(r: red, g: green, b: blue, a: alpha)
}.withUnsafeBytes { Data($0) }
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue)
let bitsPerComponent = 8
let bitsPerPixel = 32
guard
let providerRef = CGDataProvider(data: data as CFData),
let cgImage = CGImage(width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: width * MemoryLayout<PixelData>.stride,
space: rgbColorSpace,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent)
else {
return nil
}
return UIImage(cgImage: cgImage)
}
I’ve also renamed a few variables, used stride instead of size, replaced dimension with width and height so I could process non-square images, etc.
I also would advise against using a computed property for anything this computationally intense, so I gave this an asynchronous method, which you might use as follows:
let map = Map(with: image)
map.generateTintedImage { image in
self.tintedImageView.image = image
}
Anyway, the above yields the following, where the rightmost image is your tinted image:
Needless to say, to convert your matrix into your pixels array, you can just flatten the array of arrays:
let matrix: [[Pixel]] = [
[0, 0, 125],
[10, 50, 255],
[90, 0, 255]
]
pixels = matrix.flatMap { $0 }
Here is a parallelized rendition which is also slightly more efficient with respect to the memory buffer:
private func tintedImage() -> UIImage? {
let tintAlpha = tintColor.alpha
let tintRed = tintColor.red / 255
let tintGreen = tintColor.green / 255
let tintBlue = tintColor.blue / 255
let alpha = UInt8(tintAlpha)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue
let bitsPerComponent = 8
let bytesPerRow = width * MemoryLayout<PixelData>.stride
guard
let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo),
let data = context.data
else {
return nil
}
let buffer = data.bindMemory(to: PixelData.self, capacity: width * height)
DispatchQueue.concurrentPerform(iterations: height) { row in
let start = width * row
let end = start + width
for i in start ..< end {
let pixel = pixels[i]
let red = UInt8(Float(pixel) * tintRed)
let green = UInt8(Float(pixel) * tintGreen)
let blue = UInt8(Float(pixel) * tintBlue)
buffer[i] = PixelData(r: red, g: green, b: blue, a: alpha)
}
}
return context.makeImage()
.flatMap { UIImage(cgImage: $0) }
}
I'm trying to get bitmap data from a CGImage and read it into an array of pixels. However, my imagecontext and imagecontext?.draw() always returns nil.
let capturedimage = UIImage(named: "random.jpg")
let image = capturedimage?.cgImage
let width = Int((image?.width)!)
let height = Int((image?.height)!)
let bytesPerPixel = 4;
let bytesPerRow = bytesPerPixel * width;
let bitsPerComponent = 8;
let pixelArr: [[UInt32]] = Array(repeating: Array(repeating: 0, count: width), count: height)
let pointer: UnsafeMutableRawPointer = UnsafeMutableRawPointer(mutating: pixelArr);
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Big.rawValue
let imageContext = CGContext(data: pointer, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo);
imageContext?.draw(image!, in: CGRect(x: 0, y: 0, width:width, height:height))
I've checked my code multiple times, but I don't know where I went wrong.
let image = UIImage(named: "2.png")
guard let cgImage = image?.cgImage else {
fatalError()
}
let width = cgImage.width
let height = cgImage.height
let bytesPerPixel = 4;
let bytesPerRow = bytesPerPixel * width;
let bitsPerComponent = 8;
let pixelsCount = width * height
let bitmapData: UnsafeMutablePointer<UInt32> = .allocate(capacity: pixelsCount)
defer {
bitmapData.deallocate()
}
bitmapData.initialize(repeating: 0, count: pixelsCount)
var pixelAtPosition: ((Int, Int) -> UInt32) = { x, y in
return bitmapData[y * width + x]
}
guard let context = CGContext(data: bitmapData, width: width, height: height,
bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow,
space: CGColorSpaceCreateDeviceRGB(), bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue) else {
fatalError()
}
//draw image to context
var rect = CGRect(x: 0, y: 0, width: width, height: height)
context.draw(cgImage, in: rect)
let traget = pixelAtPosition(200, 200)
print(String(format: "pixel 0x%08X", traget))
print(String(format: "red 0x%2X", (traget & 0x000000FF)))
print(String(format: "green 0x%2X", (traget & 0x0000FF00) >> 8))
print(String(format: "blue 0x%2X", (traget & 0x00FF0000) >> 16))
print(String(format: "alpha 0x%2X", (traget & 0xFF000000) >> 24))
I'm sorry to ask this, but I can't figure out how to represent a UIImage as an array of UIColor for each pixel. I've tried my best with converting UIImagePNG/JPEGRepresentation but couldn't get the desired result.
Here's a Swiftier version (Swift 3):
extension UIImage {
var colors: [UIColor]? {
var colors = [UIColor]()
let colorSpace = CGColorSpaceCreateDeviceRGB()
guard let cgImage = cgImage else {
return nil
}
let width = Int(size.width)
let height = Int(size.height)
var rawData = [UInt8](repeating: 0, count: width * height * 4)
let bytesPerPixel = 4
let bytesPerRow = bytesPerPixel * width
let bytesPerComponent = 8
let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Big.rawValue
let context = CGContext(data: &rawData,
width: width,
height: height,
bitsPerComponent: bytesPerComponent,
bytesPerRow: bytesPerRow,
space: colorSpace,
bitmapInfo: bitmapInfo)
let drawingRect = CGRect(origin: .zero, size: CGSize(width: width, height: height))
context?.draw(cgImage, in: drawingRect)
for x in 0..<width {
for y in 0..<height {
let byteIndex = (bytesPerRow * x) + y * bytesPerPixel
let red = CGFloat(rawData[byteIndex]) / 255.0
let green = CGFloat(rawData[byteIndex + 1]) / 255.0
let blue = CGFloat(rawData[byteIndex + 2]) / 255.0
let alpha = CGFloat(rawData[byteIndex + 3]) / 255.0
let color = UIColor(red: red, green: green, blue: blue, alpha: alpha)
colors.append(color)
}
}
return colors
}
}
This is simply a Swift's translation of Olie's answer to the same question in ObjC. Make sure you give him an upvote as well.
extension UIImage {
func colorArray() -> [UIColor] {
let result = NSMutableArray()
let img = self.CGImage
let width = CGImageGetWidth(img)
let height = CGImageGetHeight(img)
let colorSpace = CGColorSpaceCreateDeviceRGB()
var rawData = [UInt8](count: width * height * 4, repeatedValue: 0)
let bytesPerPixel = 4
let bytesPerRow = bytesPerPixel * width
let bytesPerComponent = 8
let bitmapInfo = CGImageAlphaInfo.PremultipliedLast.rawValue | CGBitmapInfo.ByteOrder32Big.rawValue
let context = CGBitmapContextCreate(&rawData, width, height, bytesPerComponent, bytesPerRow, colorSpace, bitmapInfo)
CGContextDrawImage(context, CGRectMake(0, 0, CGFloat(width), CGFloat(height)), img);
for x in 0..<width {
for y in 0..<height {
let byteIndex = (bytesPerRow * x) + y * bytesPerPixel
let red = CGFloat(rawData[byteIndex] ) / 255.0
let green = CGFloat(rawData[byteIndex + 1]) / 255.0
let blue = CGFloat(rawData[byteIndex + 2]) / 255.0
let alpha = CGFloat(rawData[byteIndex + 3]) / 255.0
let color = UIColor(red: red, green: green, blue: blue, alpha: alpha)
result.addObject(color)
}
}
return (result as NSArray) as! [UIColor]
}
}
Note that this runs rather slowly. It takes 35 seconds for the simulator to decode a 15MP image, and that's on a quad-core i7.
To get pixels try
let image = UIImage(named: "pic2.png")
if let cgImage = image?.cgImage, let data = cgImage.dataProvider?.data, let bytes = CFDataGetBytePtr(data){
assert(cgImage.colorSpace?.model == .rgb)
var stringArray = [String]()
let bytesPerPixel = cgImage.bitsPerPixel / cgImage.bitsPerComponent
for y in 0 ..< cgImage.height{
for x in 0 ..< cgImage.width{
let offset = (y * cgImage.bytesPerRow) + ( x * bytesPerPixel)
let components = (r: bytes[offset], g: bytes[offset + 1], b: bytes[offset + 2])
stringArray.append("[x: \(x), y:\(y)] \(components)")
}
}
print(stringArray)
}