accessing UIImage properties without loading in memory the image - iphone

As you know the iphone guidelines discourage loading uiimages that are greater than 1024x1024.
The size of the images that i would have to load varies, and i would like to check the size of the image i am about to load; however using the .size property of uiimage requires the image to be laoded... which is exactly what i am trying to avoid.
Is there something wrong in my reasoning or is there a solution to that?
thank you all

As of iOS 4.0, the iOS SDK includes the CGImageSource... functions (in the ImageIO framework). It's a very flexible API to query metadata without loading the image into memory. Getting the pixel dimensions of an image should work like this (make sure to include the ImageIO.framework in your target):
#import <ImageIO/ImageIO.h>
NSURL *imageFileURL = [NSURL fileURLWithPath:...];
CGImageSourceRef imageSource = CGImageSourceCreateWithURL((CFURLRef)imageFileURL, NULL);
if (imageSource == NULL) {
// Error loading image
...
return;
}
CGFloat width = 0.0f, height = 0.0f;
CFDictionaryRef imageProperties = CGImageSourceCopyPropertiesAtIndex(imageSource, 0, NULL);
CFRelease(imageSource);
if (imageProperties != NULL) {
CFNumberRef widthNum = CFDictionaryGetValue(imageProperties, kCGImagePropertyPixelWidth);
if (widthNum != NULL) {
CFNumberGetValue(widthNum, kCFNumberCGFloatType, &width);
}
CFNumberRef heightNum = CFDictionaryGetValue(imageProperties, kCGImagePropertyPixelHeight);
if (heightNum != NULL) {
CFNumberGetValue(heightNum, kCFNumberCGFloatType, &height);
}
// Check orientation and flip size if required
CFNumberRef orientationNum = CFDictionaryGetValue(imageProperties, kCGImagePropertyOrientation);
if (orientationNum != NULL) {
int orientation;
CFNumberGetValue(orientationNum, kCFNumberIntType, &orientation);
if (orientation > 4) {
CGFloat temp = width;
width = height;
height = temp;
}
}
CFRelease(imageProperties);
}
NSLog(#"Image dimensions: %.0f x %.0f px", width, height);
(adapted from "Programming with Quartz" by Gelphman and Laden, listing 9.5, page 228)

Swift 3 version of the answer:
import Foundation
import ImageIO
func sizeForImage(at url: URL) -> CGSize? {
guard let imageSource = CGImageSourceCreateWithURL(url as CFURL, nil)
, let imageProperties = CGImageSourceCopyPropertiesAtIndex(imageSource, 0, nil) as? [AnyHashable: Any]
, let pixelWidth = imageProperties[kCGImagePropertyPixelWidth as String]
, let pixelHeight = imageProperties[kCGImagePropertyPixelHeight as String]
, let orientationNumber = imageProperties[kCGImagePropertyOrientation as String]
else {
return nil
}
var width: CGFloat = 0, height: CGFloat = 0, orientation: Int = 0
CFNumberGetValue(pixelWidth as! CFNumber, .cgFloatType, &width)
CFNumberGetValue(pixelHeight as! CFNumber, .cgFloatType, &height)
CFNumberGetValue(orientationNumber as! CFNumber, .intType, &orientation)
// Check orientation and flip size if required
if orientation > 4 { let temp = width; width = height; height = temp }
return CGSize(width: width, height: height)
}

In Swift 5, with ImageIO,
extension URL{
var sizeOfImage: CGSize?{
guard let imageSource = CGImageSourceCreateWithURL(self as CFURL, nil)
, let imageProperties = CGImageSourceCopyPropertiesAtIndex(imageSource, 0, nil) as? [AnyHashable: Any]
, let pixelWidth = imageProperties[kCGImagePropertyPixelWidth as String] as! CFNumber?
, let pixelHeight = imageProperties[kCGImagePropertyPixelHeight as String] as! CFNumber?
else {
return nil
}
var width: CGFloat = 0, height: CGFloat = 0
CFNumberGetValue(pixelWidth, .cgFloatType, &width)
CFNumberGetValue(pixelHeight, .cgFloatType, &height)
}
return CGSize(width: width, height: height)
}
}
imageProperties[kCGImagePropertyOrientation as String] may be nil.
It is nil, I tested with png image file
as Apple says
kCGImagePropertyOrientation
The numeric value for this key encodes the intended display orientation for the image according to the TIFF and Exif specifications.

Related

Incorrect saving of transparent UIImage to Photo Library as png with UIImageWriteToSavedPhotosAlbum

I have a function cropAlpha() that trims the extra space defined by the transparency.
func cropAlpha() -> UIImage {
let cgImage = self.cgImage!
let width = cgImage.width
let height = cgImage.height
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bytesPerPixel:Int = 4
let bytesPerRow = bytesPerPixel * width
let bitsPerComponent = 8
let bitmapInfo: UInt32 = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Big.rawValue
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo),
let ptr = context.data?.assumingMemoryBound(to: UInt8.self)
else { return self }
context.draw(self.cgImage!, in: CGRect(x: 0, y: 0, width: width, height: height))
var minX = width
var minY = height
var maxX: Int = 0
var maxY: Int = 0
for x in 1 ..< width {
for y in 1 ..< height {
let i = bytesPerRow * Int(y) + bytesPerPixel * Int(x)
let a = CGFloat(ptr[i + 3]) / 255.0
if a == 1 {
if (x < minX) { minX = x }
if (x > maxX) { maxX = x }
if (y < minY) { minY = y }
if (y > maxY) { maxY = y }
}
}
}
let rect = CGRect(x: CGFloat(minX),y: CGFloat(minY), width: CGFloat(maxX - minX), height: CGFloat(maxY-minY))
let croppedImage = self.cgImage!.cropping(to: rect)!
let ret = UIImage(cgImage: croppedImage)
return ret
}
The image returned by this function has transparent elements and I put it in the ImageView: presenterImageView.image = imagePNG. It works as it should. But when I try to save UIImage to Photo Gallery, transparent background turns white.
let image = maskedImage?.cropAlpha()
let imagePNGData = image!.pngData()
let imagePNG = UIImage(data: imagePNGData!)
UIImageWriteToSavedPhotosAlbum(imagePNG!, nil, nil, nil)
If I don't use that function, I get the result I want, but the image has too much wasted space. I don't understand what could be the reason. Any ideas?
The problem is that UIImageWriteToSavedPhotosAlbum does not handle properly saving a UIImage with premultiplied alpha (or at least the result of saving such image is not what you expect) and your cropping method uses premultipliedLast format. You also can't just simply change CGImageAlphaInfo to a non-premultiplied format because it is not supported there (you will see an error CGBitmapContextCreate: unsupported parameter combination if you try that). But what you can do is convert the cropped image to CIImage, unpremultiply alpha and convert back to UIImage. To do that your saving code could look like below (however I recommend removing force unwrapping from this code if you plan to use it in final app):
let image = maskedImage?.cropAlpha()
let ciImage = CIImage(image: image!)!.unpremultiplyingAlpha()
let uiImage = UIImage(ciImage: ciImage)
let imagePNGData = uiImage.pngData()
let imagePNG = UIImage(data: imagePNGData!)
UIImageWriteToSavedPhotosAlbum(imagePNG!, nil, nil, nil)

Take snapshot from UIView with lower resolution

I'm taking snapshot from a PDFView in PDFKit for streaming (20 times per sec), and I use this extesnsion
extension UIView {
func asImageBackground(viewLayer: CALayer, viewBounds: CGRect) -> UIImage {
let renderer = UIGraphicsImageRenderer(bounds: viewBounds)
return renderer.image { rendererContext in
viewLayer.render(in: rendererContext.cgContext)
}
}
}
But the output UIImage from this extension has a high resolution which make it difficult to stream. I can reduce it by this extension
extension UIImage {
func resize(_ max_size: CGFloat) -> UIImage {
// adjust for device pixel density
let max_size_pixels = max_size / UIScreen.main.scale
// work out aspect ratio
let aspectRatio = size.width/size.height
// variables for storing calculated data
var width: CGFloat
var height: CGFloat
var newImage: UIImage
if aspectRatio > 1 {
// landscape
width = max_size_pixels
height = max_size_pixels / aspectRatio
} else {
// portrait
height = max_size_pixels
width = max_size_pixels * aspectRatio
}
// create an image renderer of the correct size
let renderer = UIGraphicsImageRenderer(size: CGSize(width: width, height: height), format: UIGraphicsImageRendererFormat.default())
// render the image
newImage = renderer.image {
(context) in
self.draw(in: CGRect(x: 0, y: 0, width: width, height: height))
}
// return the image
return newImage
}
}
but it add an additional workload which make the process even worse. Is there any better way?
Thanks
You can downsample it using ImageIO which is recommended by Apple:
extension UIImage {
func downsample(to resolution: CGSize) -> UIImage? {
let imageSourceOptions = [kCGImageSourceShouldCache: false] as CFDictionary
guard let data = self.jpegData(compressionQuality: 0.75) as? CFData, let imageSource = CGImageSourceCreateWithData(data, imageSourceOptions) else {
return nil
}
let maxDimensionInPixels = Swift.max(resolution.width, resolution.height) * 3
let downsampleOptions = [
kCGImageSourceCreateThumbnailFromImageAlways: true,
kCGImageSourceShouldCacheImmediately: true,
kCGImageSourceCreateThumbnailWithTransform: true,
kCGImageSourceThumbnailMaxPixelSize: maxDimensionInPixels
] as CFDictionary
guard let downsampledImage = CGImageSourceCreateThumbnailAtIndex(imageSource, 0, downsampleOptions) else {
return nil
}
return UIImage(cgImage: downsampledImage)
}
}

How to provide Apple Watch Complication Asset for 45mm?

When adding assets for the Graphic Circular Complication there is no option to add an asset for the 45mm version, thus the image does not fill the available space.
Result (The image does not fill the space as it is too small):
I have read that I need to use PDF assets for the 40/42mm but my image is a raster image and thus I can't create it as a PDF. I want to scale the image myself and add it as an asset but there is no option to drop it.
What should I do?
The issue is that the size of the image in the asset catalog is smaller than it really should be according to the Apple Human Interface Guidelines. Thus this causes the images not to be filled. As there's no option to drop the 45mm version you need to calculate and resize the image yourself.
This article is the solution!
http://www.glimsoft.com/02/18/watchos-complications/?utm_campaign=iOS%2BDev%2BWeekly&utm_medium=web&utm_source=iOS%2BDev%2BWeekly%2BIssue%2B547
ComplicationController+Ext.swift
extension ComplicationController {
enum ComplicationImageType {
case graphicCircularImage
}
struct ComplicationImageSizeCollection {
var size38mm: CGFloat = 0
let size40mm: CGFloat
let size41mm: CGFloat
let size44mm: CGFloat
let size45mm: CGFloat
// The following sizes are taken directly from HIG: https://developer.apple.com/design/human-interface-guidelines/watchos/overview/complications/
static let graphicCircularImageSizes = ComplicationImageSizeCollection(size40mm: 42, size41mm: 44.5, size44mm: 47, size45mm: 50)
func sizeForCurrentWatchModel() -> CGFloat {
let screenHeight = WKInterfaceDevice.current().screenBounds.size.height
if screenHeight >= 242 {
// It's the 45mm version..
return self.size45mm
}
else if screenHeight >= 224 {
// It's the 44mm version..
return self.size44mm
}
else if screenHeight >= 215 {
// It's the 41mm version..
return self.size41mm
}
else if screenHeight >= 197 {
return self.size40mm
}
else if screenHeight >= 170 {
return self.size38mm
}
return self.size40mm // Fallback, just in case.
}
static func sizes(for type: ComplicationImageType) -> ComplicationImageSizeCollection {
switch type {
case .graphicCircularImage: return Self.graphicCircularImageSizes
}
}
static func getImage(for type: ComplicationImageType) -> UIImage {
let complicationImageSizes = ComplicationImageSizeCollection.sizes(for: .graphicCircularImage)
let width = complicationImageSizes.sizeForCurrentWatchModel()
let size = CGSize(width: width, height: width)
var filename: String!
switch type {
case .graphicCircularImage: filename = "gedenken_graphic_circular_pdf"
}
return renderPDFToImage(named: filename, outputSize: size)
}
static private func renderPDFToImage(named filename: String, outputSize size: CGSize) -> UIImage {
// Create a URL for the PDF file
let resourceName = filename.replacingOccurrences(of: ".pdf", with: "")
let path = Bundle.main.path(forResource: resourceName, ofType: "pdf")!
let url = URL(fileURLWithPath: path)
guard let document = CGPDFDocument(url as CFURL),
let page = document.page(at: 1) else {
fatalError("We couldn't find the document or the page")
}
let originalPageRect = page.getBoxRect(.mediaBox)
// With the multiplier, we bring the pdf from its original size to the desired output size.
let multiplier = size.width / originalPageRect.width
UIGraphicsBeginImageContextWithOptions(size, false, 0)
let context = UIGraphicsGetCurrentContext()!
// Translate the context
context.translateBy(x: 0, y: (originalPageRect.size.height * multiplier))
// Flip the context vertically because the Core Graphics coordinate system starts from the bottom.
context.scaleBy(x: multiplier * 1.0, y: -1.0 * multiplier)
// Draw the PDF page
context.drawPDFPage(page)
let image = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return image
}
}
}
ComplicationController.swift
func createGraphicCircularTemplate() -> CLKComplicationTemplate {
let template = CLKComplicationTemplateGraphicCircularImage()
let imageLogoProvider = CLKFullColorImageProvider()
imageLogoProvider.image = ComplicationImageSizeCollection.getImage(for: .graphicCircularImage)
template.imageProvider = imageLogoProvider
return template
}

Firebase ML kit misaligned bounding box

I'm trying to use the new Detect and Track Objects with ML Kit on iOS however I seem to be running into a roadblock with the object detection bounding box.
Using a lego figure as an example, the image orientation is converted to always be .up as per the documentation however the bounding box almost seems to be rotated 90 degrees to the correct dimensions despite the image orientation being correct. This similar behaviour exists on other objects too with the box being offset.
let options = VisionObjectDetectorOptions()
options.detectorMode = .singleImage
options.shouldEnableMultipleObjects = false
let objectDetector = Vision.vision().objectDetector(options: options)
let image = VisionImage(image: self.originalImage)
objectDetector.process(image) { detectedObjects, error in
guard error == nil else {
print(error)
return
}
guard let detectedObjects = detectedObjects, !detectedObjects.isEmpty else {
print("No objects detected")
return
}
let primaryObject = detectedObjects.first
print(primaryObject as Any)
guard let objectFrame = primaryObject?.frame else{return}
print(objectFrame)
self.imageView.image = self.drawOccurrencesOnImage([objectFrame], self.originalImage)
}
and the function that draws the red box;
private func drawOccurrencesOnImage(_ occurrences: [CGRect], _ image: UIImage) -> UIImage? {
let imageSize = image.size
let scale: CGFloat = 0.0
UIGraphicsBeginImageContextWithOptions(imageSize, false, scale)
image.draw(at: CGPoint.zero)
let ctx = UIGraphicsGetCurrentContext()
ctx?.addRects(occurrences)
ctx?.setStrokeColor(UIColor.red.cgColor)
ctx?.setLineWidth(20)
ctx?.strokePath()
guard let drawnImage = UIGraphicsGetImageFromCurrentImageContext() else {
return nil
}
UIGraphicsEndImageContext()
return drawnImage
}
The image dimensions, according to image.size is (3024.0, 4032.0) and the box frame is (1274.0, 569.0, 1299.0, 2023.0). Any insight to this behaviour would be must appreciated.
Ended up not scaling the image properly which caused the misalignment.
This function ended up fixing my problems.
public func updateImageView(with image: UIImage) {
let orientation = UIApplication.shared.statusBarOrientation
var scaledImageWidth: CGFloat = 0.0
var scaledImageHeight: CGFloat = 0.0
switch orientation {
case .portrait, .portraitUpsideDown, .unknown:
scaledImageWidth = imageView.bounds.size.width
scaledImageHeight = image.size.height * scaledImageWidth / image.size.width
case .landscapeLeft, .landscapeRight:
scaledImageWidth = image.size.width * scaledImageHeight / image.size.height
scaledImageHeight = imageView.bounds.size.height
}
DispatchQueue.global(qos: .userInitiated).async {
// Scale image while maintaining aspect ratio so it displays better in the UIImageView.
var scaledImage = image.scaledImage(
with: CGSize(width: scaledImageWidth, height: scaledImageHeight)
)
scaledImage = scaledImage ?? image
guard let finalImage = scaledImage else { return }
DispatchQueue.main.async {
self.imageView.image = finalImage
self.processImage(finalImage)
}
}
}

How to create a MTLTexture backed by a CVPixelBuffer

What's the correct way to generate a MTLTexture backed by a CVPixelBuffer?
I have the following code, but it seems to leak:
func PixelBufferToMTLTexture(pixelBuffer:CVPixelBuffer) -> MTLTexture
{
var texture:MTLTexture!
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let format:MTLPixelFormat = .BGRA8Unorm
var textureRef : Unmanaged<CVMetalTextureRef>?
let status = CVMetalTextureCacheCreateTextureFromImage(nil,
videoTextureCache!.takeUnretainedValue(),
pixelBuffer,
nil,
format,
width,
height,
0,
&textureRef)
if(status == kCVReturnSuccess)
{
texture = CVMetalTextureGetTexture(textureRef!.takeUnretainedValue())
}
return texture
}
Ah, I was missing: textureRef?.release()