vImageBoxConvolve_ARGB8888 backgroundColor parameter - swift

I use the vImageBoxConvolve_ARGB8888 function in order to blur a UIImage. The code is shown below:
public func blur(_ size: Int) -> UIImage! {
let boxSize = size - (size % 2) + 1
let image = self.cgImage
let inProvider = image?.dataProvider
let height = vImagePixelCount((image?.height)!)
let width = vImagePixelCount((image?.width)!)
let rowBytes = image?.bytesPerRow
var inBitmapData = inProvider?.data
let inData = UnsafeMutableRawPointer(mutating: CFDataGetBytePtr(inBitmapData))
var inBuffer = vImage_Buffer(data: inData, height: height, width: width, rowBytes: rowBytes!)
let outData = malloc((image?.bytesPerRow)! * (image?.height)!)
var outBuffer = vImage_Buffer(data: outData, height: height, width: width, rowBytes: rowBytes!)
var error = vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer, nil, 0, 0, UInt32(boxSize), UInt32(boxSize), nil, vImage_Flags(kvImageEdgeExtend))
error = vImageBoxConvolve_ARGB8888(&outBuffer, &inBuffer, nil, 0, 0, UInt32(boxSize), UInt32(boxSize), nil, vImage_Flags(kvImageEdgeExtend))
error = vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer, nil, 0, 0, UInt32(boxSize), UInt32(boxSize), nil, vImage_Flags(kvImageEdgeExtend))
inBitmapData = nil
let colorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: outBuffer.data, width: Int(outBuffer.width), height: Int(outBuffer.height), bitsPerComponent: 8, bytesPerRow: outBuffer.rowBytes, space: colorSpace, bitmapInfo: (image?.bitmapInfo.rawValue)!, releaseCallback: {(ptr1, ptr2) in
}, releaseInfo: outData)!
var imageRef = context.makeImage()
let bluredImage = UIImage(cgImage: imageRef!)
imageRef = nil
free(outData)
context.flush()
context.synchronize()
return bluredImage
}
The vImageBoxConvolve_ARGB8888 function accepts a background color parameter (the 8th parameter) which is of type UnsafePointer. Here the parameter in nil, but I want to set it in red color. I have no idea how to do that. If someone could give any tips I would appreciate. Thanks in advance.

You can do it like so :
let redPointer = UnsafePointer<UInt8>([0xFF, 0x00, 0x00])
var error = vImageBoxConvolve_ARGB8888(&inBuffer,
&outBuffer,
nil,
0,
0,
UInt32(boxSize),
UInt32(boxSize),
redPointer,
vImage_Flags(kvImageBackgroundColorFill))
Bear in mind that the flags kvImageBackgroundColorFill and kvImageEdgeExtend are mutually exclusive, so you can't pass kvImageBackgroundColorFill + kvImageEdgeExtend in the flags argument.

Related

how to properly extract the array of numbers from an image in swift?

I'm trying to extract the array of numbers from a UIImage in swift but at the end I got only a bunch of zeros no useful information at all.
that's the code I wrote to try accomplishing this.
var photo = UIImage(named: "myphoto.jpg")!
var withAlpha = true
var bytesPerPixels: Int = withAlpha ? 4 : 3
var width: Int = Int(photo.size.width)
var height: Int = Int(photo.size.height)
var bitsPerComponent: Int = 8
var bytesPerRow = bytesPerPixels * width
var totalPixels = (bytesPerPixels * width) * height
var alignment = MemoryLayout<UInt32>.alignment
var data = UnsafeMutableRawPointer.allocate(byteCount: totalPixels, alignment: alignment )
var bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue).rawValue
var colorSpace = CGColorSpaceCreateDeviceRGB()
let ctx = CGContext(data: data, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo)
let bindedPointer: UnsafeMutablePointer<UInt32> = data.bindMemory(to: UInt32.self, capacity: totalPixels)
var pixels = UnsafeMutableBufferPointer.init(start: bindedPointer, count: totalPixels)
for p in pixels{
print(p, Date())
}
At the end I tried to bind the unsafeMutableRawPointer to extract the values but got no success,
what could I be missing here?
Thank you all in advance.
A few observations:
You need to draw the image to the context.
I’d also suggest that rather than creating a buffer that you have to manage manually, that you pass nil and let the OS create (and manage) that buffer for you.
Note that totalPixels should be just width * height.
Your code assumes the scale of the image is 1. That’s not always a valid assumption. I’d grab the cgImage and use its width and height.
Even if you have only three components, you still need to use 4 bytes per pixel.
Thus:
guard
let photo = UIImage(named: "myphoto.jpg”),
let cgImage = photo.cgImage
else { return }
let bytesPerPixels = 4
let width = cgImage.width
let height = cgImage.height
let bitsPerComponent: Int = 8
let bytesPerRow = bytesPerPixels * width
let totalPixels = width * height
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue).rawValue
let colorSpace = CGColorSpaceCreateDeviceRGB()
guard
let ctx = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo),
let data = ctx.data
else { return }
ctx.draw(cgImage, in: CGRect(x: 0, y: 0, width: width, height: height))
let pointer = data.bindMemory(to: UInt32.self, capacity: totalPixels)
let pixels = UnsafeMutableBufferPointer(start: pointer, count: totalPixels)
for p in pixels {
print(String(p, radix: 16), Date())
}
You need to draw the image into the context.
ctx?.draw(photo.cgImage!, in: CGRect(origin: .zero, size: photo.size))
Add that just after creating the CGContext.

Convert raw image data to jpeg in swift

I am trying to convert the raw image data to jpeg in swift. But unfortunately, the jpeg image created is skewed. Please find below the code used for the conversion.
let rawPtr = (rawImgData as NSData).bytes
let mutableRawPtr = UnsafeMutableRawPointer.init(mutating: rawPtr)
let context = CGContext.init(data: mutableRawPtr,
width: 750,
height: 1334,
bitsPerComponent: 32,
bytesPerRow: (8 * 750)/8,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue())
let imageRef = CGContext.makeImage(context!)
let imageRep = NSBitmapImageRep(cgImage: imageRef()!)
let finalData = imageRep.representation(using: .jpeg,
properties: [NSBitmapImageRep.PropertyKey.compressionFactor : 0.5])
Here's the converted jpeg image
Any help or pointers would be greatly appreciated. Thanks in advance!
Finally figured out the answer
var width:size_t?
var height:size_t?
let bitsPerComponent:size_t = 8
var bytesPerRow:size_t?
var bitmapInfo: UInt32
if #available(OSX 10.12, *) {
bitmapInfo = UInt32(CGImageAlphaInfo.premultipliedFirst.rawValue) | UInt32(CGImageByteOrderInfo.order32Little.rawValue)
} else {
bitmapInfo = UInt32(CGImageAlphaInfo.premultipliedFirst.rawValue)
}
let colorSpace = CGColorSpaceCreateDeviceRGB()
do {
let streamAttributesFuture = self.bitmapStream?.streamAttributes()
streamAttributesFuture?.onQueue(.main, notifyOfCompletion: { (streamAttributesFut) in
})
try streamAttributesFuture?.await()
let dict = streamAttributesFuture?.result
width = (dict?.attributes["width"] as! Int)
height = (dict?.attributes["height"] as! Int)
bytesPerRow = (dict?.attributes["row_size"] as! Int)
} catch let frameError{
print("frame error = \(frameError)")
return
}
let rawPtr = (data as NSData).bytes
let mutableRawPtr = UnsafeMutableRawPointer.init(mutating: rawPtr)
let context = CGContext.init(data: mutableRawPtr, width: width!, height: height!, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow!, space: colorSpace, bitmapInfo: bitmapInfo)
if context == nil {
return
}
let imageRef = context?.makeImage()
let scaledWidth = Int(Double(width!) / 1.5)
let scaledHeight = Int(Double(height!) / 1.5)
let resizeCtx = CGContext.init(data: nil, width: scaledWidth, height: scaledHeight, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow!, space: colorSpace, bitmapInfo: bitmapInfo)
resizeCtx?.draw(imageRef!, in: CGRect(x: 0, y: 0, width: scaledWidth, height: scaledHeight))
resizeCtx?.interpolationQuality = .low
let resizedImgRef = resizeCtx?.makeImage()
let imageRep = NSBitmapImageRep(cgImage: resizedImgRef!)
let finalData = imageRep.representation(using: .jpeg, properties: [NSBitmapImageRep.PropertyKey.compressionFactor : compression])
Try this, change imageName to what you want, and save it in any Directory
func convertToJPEG(image: UIImage) {
if let jpegImage = UIImageJPEGRepresentation(image, 1.0){
do {
//Convert
let tempDirectoryURL = URL(fileURLWithPath: NSTemporaryDirectory(), isDirectory: true)
let newFileName = "\(imageName.append(".JPG"))"
let targetURL = tempDirectoryURL.appendingPathComponent(newFileName)
try jpegImage.write(to: targetURL, options: [])
}catch {
print (error.localizedDescription)
print("FAILED")
}
}else{
print("FAILED")
}
}

getting bitmap from cgimage

I'm trying to get bitmap data from a CGImage and read it into an array of pixels. However, my imagecontext and imagecontext?.draw() always returns nil.
let capturedimage = UIImage(named: "random.jpg")
let image = capturedimage?.cgImage
let width = Int((image?.width)!)
let height = Int((image?.height)!)
let bytesPerPixel = 4;
let bytesPerRow = bytesPerPixel * width;
let bitsPerComponent = 8;
let pixelArr: [[UInt32]] = Array(repeating: Array(repeating: 0, count: width), count: height)
let pointer: UnsafeMutableRawPointer = UnsafeMutableRawPointer(mutating: pixelArr);
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Big.rawValue
let imageContext = CGContext(data: pointer, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo);
imageContext?.draw(image!, in: CGRect(x: 0, y: 0, width:width, height:height))
I've checked my code multiple times, but I don't know where I went wrong.
let image = UIImage(named: "2.png")
guard let cgImage = image?.cgImage else {
fatalError()
}
let width = cgImage.width
let height = cgImage.height
let bytesPerPixel = 4;
let bytesPerRow = bytesPerPixel * width;
let bitsPerComponent = 8;
let pixelsCount = width * height
let bitmapData: UnsafeMutablePointer<UInt32> = .allocate(capacity: pixelsCount)
defer {
bitmapData.deallocate()
}
bitmapData.initialize(repeating: 0, count: pixelsCount)
var pixelAtPosition: ((Int, Int) -> UInt32) = { x, y in
return bitmapData[y * width + x]
}
guard let context = CGContext(data: bitmapData, width: width, height: height,
bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow,
space: CGColorSpaceCreateDeviceRGB(), bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue) else {
fatalError()
}
//draw image to context
var rect = CGRect(x: 0, y: 0, width: width, height: height)
context.draw(cgImage, in: rect)
let traget = pixelAtPosition(200, 200)
print(String(format: "pixel 0x%08X", traget))
print(String(format: "red 0x%2X", (traget & 0x000000FF)))
print(String(format: "green 0x%2X", (traget & 0x0000FF00) >> 8))
print(String(format: "blue 0x%2X", (traget & 0x00FF0000) >> 16))
print(String(format: "alpha 0x%2X", (traget & 0xFF000000) >> 24))

Get Alpha Percentage of image Swift

I have function to calculate alpha of image. But I got crash for iPhone 5, work well with iPhone 6 and later.
private func alphaOnlyPersentage(img: UIImage) -> Float {
let width = Int(img.size.width)
let height = Int(img.size.height)
let bitmapBytesPerRow = width
let bitmapByteCount = bitmapBytesPerRow * height
let pixelData = UnsafeMutablePointer<UInt8>.allocate(capacity: bitmapByteCount)
let colorSpace = CGColorSpaceCreateDeviceGray()
let context = CGContext(data: pixelData,
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: bitmapBytesPerRow,
space: colorSpace,
bitmapInfo: CGBitmapInfo(rawValue: CGImageAlphaInfo.alphaOnly.rawValue).rawValue)!
let rect = CGRect(x: 0, y: 0, width: width, height: height)
context.clear(rect)
context.draw(img.cgImage!, in: rect)
var alphaOnlyPixels = 0
for x in 0...Int(width) {
for y in 0...Int(height) {
if pixelData[y * width + x] == 0 {
alphaOnlyPixels += 1
}
}
}
free(pixelData)
return Float(alphaOnlyPixels) / Float(bitmapByteCount)
}
Please help me to fix! Thank you. Sorry I am newbie with iOS coding.
Replace ... with ..< otherwise you are accessing one row and one column too many.
Note the crash is random depending on how the memory is allocated and whether you have access to the bytes at the given address, which is outside of the chunk allocated for you.
Or replace the iteration by simpler:
for i in 0 ..< bitmapByteCount {
if pixelData[i] == 0 {
alphaOnlyPixels += 1
}
}
You can also use Data to create your bytes, which will later simplify iteration:
var pixelData = Data(count: bitmapByteCount)
pixelData.withUnsafeMutableBytes { (bytes: UnsafeMutablePointer<UInt8>) in
let context = CGContext(data: bytes,
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: bitmapBytesPerRow,
space: colorSpace,
bitmapInfo: CGImageAlphaInfo.alphaOnly.rawValue)!
let rect = CGRect(x: 0, y: 0, width: width, height: height)
context.clear(rect)
context.draw(img.cgImage!, in: rect)
}
let alphaOnlyPixels = pixelData.filter { $0 == 0 }.count

Swift blur extension on UIImage crashes

I have written an extension for UIImage and get nil for the context. Any idea what could be the problem?
public func blur(size: Float) -> UIImage! {
let boxSize = size - (size % 2) + 1
let image = self.CGImage
let inProvider = CGImageGetDataProvider(image)
let height = vImagePixelCount(CGImageGetHeight(image))
let width = vImagePixelCount(CGImageGetWidth(image))
let rowBytes = CGImageGetBytesPerRow(image)
let inBitmapData = CGDataProviderCopyData(inProvider)
let inData = UnsafeMutablePointer<Void>(CFDataGetBytePtr(inBitmapData))
var inBuffer = vImage_Buffer(data: inData, height: height, width: width, rowBytes: rowBytes)
let outData = malloc(CGImageGetBytesPerRow(image) * CGImageGetHeight(image))
var outBuffer = vImage_Buffer(data: outData, height: height, width: width, rowBytes: rowBytes)
let _ = vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer, nil, 0, 0, UInt32(boxSize), UInt32(boxSize), nil, vImage_Flags(kvImageEdgeExtend))
let colorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(outBuffer.data, Int(outBuffer.width), Int(outBuffer.height), 8, outBuffer.rowBytes, colorSpace, CGImageGetBitmapInfo(image).rawValue)
let imageRef = CGBitmapContextCreateImage(context)!
let bluredImage = UIImage(CGImage: imageRef)
free(outData)
return bluredImage
}
The problem was to do the blur on CGImage not UIImage directly:
extension UIImage {
class func blurEffect(cgImage: CGImageRef) -> UIImage! {
return UIImage(CGImage: cgImage)
}
func blurEffect(boxSize: CGFloat) -> UIImage! {
return UIImage(CGImage: bluredCGImage(boxSize))
}
func bluredCGImage(boxSize: CGFloat) -> CGImageRef! {
return CGImage!.blurEffect(boxSize)
}
}
extension CGImage {
func blurEffect(boxSize: CGFloat) -> CGImageRef! {
let boxSize = boxSize - (boxSize % 2) + 1
let inProvider = CGImageGetDataProvider(self)
let height = vImagePixelCount(CGImageGetHeight(self))
let width = vImagePixelCount(CGImageGetWidth(self))
let rowBytes = CGImageGetBytesPerRow(self)
let inBitmapData = CGDataProviderCopyData(inProvider)
let inData = UnsafeMutablePointer<Void>(CFDataGetBytePtr(inBitmapData))
var inBuffer = vImage_Buffer(data: inData, height: height, width: width, rowBytes: rowBytes)
let outData = malloc(CGImageGetBytesPerRow(self) * CGImageGetHeight(self))
var outBuffer = vImage_Buffer(data: outData, height: height, width: width, rowBytes: rowBytes)
vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer, nil, 0, 0, UInt32(boxSize), UInt32(boxSize), nil, vImage_Flags(kvImageEdgeExtend))
let colorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(outBuffer.data, Int(outBuffer.width), Int(outBuffer.height), 8, outBuffer.rowBytes, colorSpace, CGImageGetBitmapInfo(self).rawValue)
let imageRef = CGBitmapContextCreateImage(context)
free(outData)
return imageRef
}
}