Take snapshot from UIView with lower resolution - swift

I'm taking snapshot from a PDFView in PDFKit for streaming (20 times per sec), and I use this extesnsion
extension UIView {
func asImageBackground(viewLayer: CALayer, viewBounds: CGRect) -> UIImage {
let renderer = UIGraphicsImageRenderer(bounds: viewBounds)
return renderer.image { rendererContext in
viewLayer.render(in: rendererContext.cgContext)
}
}
}
But the output UIImage from this extension has a high resolution which make it difficult to stream. I can reduce it by this extension
extension UIImage {
func resize(_ max_size: CGFloat) -> UIImage {
// adjust for device pixel density
let max_size_pixels = max_size / UIScreen.main.scale
// work out aspect ratio
let aspectRatio = size.width/size.height
// variables for storing calculated data
var width: CGFloat
var height: CGFloat
var newImage: UIImage
if aspectRatio > 1 {
// landscape
width = max_size_pixels
height = max_size_pixels / aspectRatio
} else {
// portrait
height = max_size_pixels
width = max_size_pixels * aspectRatio
}
// create an image renderer of the correct size
let renderer = UIGraphicsImageRenderer(size: CGSize(width: width, height: height), format: UIGraphicsImageRendererFormat.default())
// render the image
newImage = renderer.image {
(context) in
self.draw(in: CGRect(x: 0, y: 0, width: width, height: height))
}
// return the image
return newImage
}
}
but it add an additional workload which make the process even worse. Is there any better way?
Thanks

You can downsample it using ImageIO which is recommended by Apple:
extension UIImage {
func downsample(to resolution: CGSize) -> UIImage? {
let imageSourceOptions = [kCGImageSourceShouldCache: false] as CFDictionary
guard let data = self.jpegData(compressionQuality: 0.75) as? CFData, let imageSource = CGImageSourceCreateWithData(data, imageSourceOptions) else {
return nil
}
let maxDimensionInPixels = Swift.max(resolution.width, resolution.height) * 3
let downsampleOptions = [
kCGImageSourceCreateThumbnailFromImageAlways: true,
kCGImageSourceShouldCacheImmediately: true,
kCGImageSourceCreateThumbnailWithTransform: true,
kCGImageSourceThumbnailMaxPixelSize: maxDimensionInPixels
] as CFDictionary
guard let downsampledImage = CGImageSourceCreateThumbnailAtIndex(imageSource, 0, downsampleOptions) else {
return nil
}
return UIImage(cgImage: downsampledImage)
}
}

Related

Cocoa: Capture Screen and scale image on saving in Swift

Below code I am using to capture screen in macOS application,
let img = CGDisplayCreateImage(CGMainDisplayID())
guard let destination = FileManager.default.urls(for: .downloadsDirectory,
in: .userDomainMask).first?.appendingPathComponent("shot.jpg", isDirectory: false)
else {
print("Unable to save captured image!")
return
}
let properties: CFDictionary = [
kCGImagePropertyPixelWidth: "900",
kCGImagePropertyPixelHeight: "380"
] as CFDictionary
if let dest = CGImageDestinationCreateWithURL(destination as CFURL, kUTTypeJPEG, 1, properties) {
CGImageDestinationAddImage(dest, img!, properties)
CGImageDestinationFinalize(dest)
}
else {
print("Unable to create captured image to the destination!")
}
I have to scale the image to particular size while saving. So, I used CFDictionary with width, heigh properties of the image. But It's seems I am doing it as wrong. Please help me to find out correct solution. Thank you!
First, you can't resize using CGImageDestinationCreateWithURL or CGImageDestinationAddImage. If you look at the docs here and here you will notice that neither kCGImagePropertyPixelWidth or kCGImagePropertyPixelHeight is supported.
You will need to resize manually. You can use this tool, or modify it, if you find it helpful. It supports fill (stretch) and fit (scale while keeping the original aspect ratio) content modes. If you specify .fit it will center the drawing in the resulting image. If you specify .fill it will fill the whole space stretching whichever dimension it needs to.
enum ImageResizer {
enum ContentMode {
case fill
case fit
}
enum Error: Swift.Error {
case badOriginal
case resizeFailed
}
static func resize(_ source: CGImage, to targetSize: CGSize, mode: ContentMode) throws -> CGImage {
let context = CGContext(
data: nil,
width: Int(targetSize.width),
height: Int(targetSize.height),
bitsPerComponent: source.bitsPerComponent,
bytesPerRow: 0,
space: source.colorSpace ?? CGColorSpace(name: CGColorSpace.sRGB)!,
bitmapInfo: source.bitmapInfo.rawValue
)
guard let context = context else {
throw Error.badOriginal
}
let drawingSize: CGSize
switch mode {
case .fill:
drawingSize = targetSize
case .fit:
drawingSize = CGSize(width: source.width, height: source.height)
.scaledToFit(target: targetSize)
}
let drawRect = CGRect(origin: .zero, size: targetSize)
.makeCenteredRect(withSize: drawingSize)
context.interpolationQuality = .high
context.draw(source, in: drawRect)
guard let result = context.makeImage() else {
throw Error.resizeFailed
}
return result
}
}
ImageResizer depends on these CG extensions for scaling the source image and centering scaled image:
extension CGSize {
var maxDimension: CGFloat {
Swift.max(width, height)
}
var minDimension: CGFloat {
Swift.min(width, height)
}
func scaled(by scalar: CGFloat) -> CGSize {
CGSize(width: width * scalar, height: height * scalar)
}
func scaleFactors(to target: CGSize) -> CGSize {
CGSize(
width: target.width / width,
height: target.height / height
)
}
func scaledToFit(target: CGSize) -> CGSize {
return scaled(by: scaleFactors(to: target).minDimension)
}
}
extension CGRect {
func makeCenteredRect(withSize size: CGSize) -> CGRect {
let origin = CGPoint(
x: midX - size.width / 2.0,
y: midY - size.height / 2.0
)
return CGRect(origin: origin, size: size)
}
}
Also, make sure you set up permissions if you're going to save to .downloadsDirectory.

How can I add a square image to a QRCode | Swift

Essentially I have the following QR Code function that successfully creates a QR code based on a given string - how can add a square image to the center of this QR code that is static no matter what string the code represents?
The following is the function I use to generate:
func generateQRCode(from string: String) -> UIImage? {
let data = string.data(using: String.Encoding.ascii)
if let filter = CIFilter(name: "CIQRCodeGenerator") {
filter.setValue(data, forKey: "inputMessage")
let transform = CGAffineTransform(scaleX: 3, y: 3)
if let output = filter.outputImage?.transformed(by: transform) {
return UIImage(ciImage: output)
}
}
return nil
}
Sample code from one of my apps, only slightly commented.
The size calculations maybe won't be required for you app.
func generateImage(code: String, size pointSize: CGSize, logo: UIImage? = nil) -> UIImage? {
let pixelScale = UIScreen.main.scale
let pixelSize = CGSize(width: pointSize.width * pixelScale, height: pointSize.height * pixelScale)
guard
let codeData = code.data(using: .isoLatin1),
let generator = CIFilter(name: "CIQRCodeGenerator")
else {
return nil
}
generator.setValue(codeData, forKey: "inputMessage")
// set higher self-correction level
generator.setValue("Q", forKey: "inputCorrectionLevel")
guard let codeImage = generator.outputImage else {
return nil
}
// calculate transform depending on required size
let transform = CGAffineTransform(
scaleX: pixelSize.width / codeImage.extent.width,
y: pixelSize.height / codeImage.extent.height
)
let scaledCodeImage = UIImage(ciImage: codeImage.transformed(by: transform), scale: 0, orientation: .up)
guard let logo = logo else {
return scaledCodeImage
}
// create a drawing buffer
UIGraphicsBeginImageContextWithOptions(pointSize, false, 0)
defer {
UIGraphicsEndImageContext()
}
// draw QR code into the buffer
scaledCodeImage.draw(in: CGRect(origin: .zero, size: pointSize))
// calculate scale to cover the central 25% of the image
let logoScaleFactor: CGFloat = 0.25
// update depending on logo width/height ratio
let logoScale = min(
pointSize.width * logoScaleFactor / logo.size.width,
pointSize.height * logoScaleFactor / logo.size.height
)
// size of the logo
let logoSize = CGSize(width: logoScale * logo.size.width, height: logoScale * logo.size.height)
// draw the logo
logo.draw(in: CGRect(
x: (pointSize.width - logoSize.width) / 2,
y: (pointSize.height - logoSize.height) / 2,
width: logoSize.width,
height: logoSize.height
))
return UIGraphicsGetImageFromCurrentImageContext()!
}

How to stroke on UIImage?

I would like to save the colored shapes along with strokes but the code below (getImage()) generates unstroked shapes:
func getImage() -> UIImage {
let renderer = UIGraphicsImageRenderer(size: CGSize(width: 1024, height: 1024))
let image = renderer.image { (context) in
for key in shapeItemKeys {
let currentShape = shapeItemsByKey[key]!
UIColor.black.setStroke()
context.stroke(renderer.format.bounds)
currentShape.color.setFill()
context.cgContext.addPath(currentShape.path.bezierPath.cgPath)
context.cgContext.fillPath()
context.cgContext.strokePath()
}
}
return image
}
struct ShapeItem: Identifiable {
let id = UUID()
var color: UIColor = UIColor.white
var path: ScaledBezier
init(path: ScaledBezier) {
self.path = path
}
}
struct ScaledBezier: Shape {
let bezierPath: UIBezierPath
let sourceWidth: CGFloat
let sourceHeight: CGFloat
func path(in rect: CGRect) -> Path {
var path = Path(bezierPath.cgPath)
// Figure out how much bigger we need to make our path in order for it to fill the available space without clipping.
let multiplier = min(rect.width/sourceWidth, rect.height/sourceHeight)
// Create an affine transform that uses the multiplier for both dimensions equally.
let transform = CGAffineTransform(scaleX: multiplier, y: multiplier)
// Apply that scale and send back the result.
path.closeSubpath()
return path.applying(transform)
}
}
Does anyone know how to stroke shapes in order to be visible on UIImage?
If you want and to stroke and to fill then you need to use path for each as follows
func getImage() -> UIImage {
let renderer = UIGraphicsImageRenderer(size: CGSize(width: 1024, height: 1024))
let image = renderer.image { (context) in
for key in shapeItemKeys {
let currentShape = shapeItemsByKey[key]!
UIColor.black.setStroke()
context.stroke(renderer.format.bounds)
currentShape.color.setFill()
context.cgContext.addPath(currentShape.path.bezierPath.cgPath)
context.cgContext.strokePath()
context.cgContext.addPath(currentShape.path.bezierPath.cgPath)
context.cgContext.fillPath()
}
}
return image
}
Demo code:
struct DemoView: View {
let shapeItemKeys = [1]
let shapeItemsByKey = [1: ShapeItem(path: ScaledBezier(bezierPath: UIBezierPath(roundedRect: CGRect(x: 10, y: 10, width: 100, height: 200), cornerRadius: 20), sourceWidth: 100, sourceHeight: 200))]
var body: some View {
VStack(spacing: 0) {
Image(uiImage: getImage())
}
}
func getImage() -> UIImage {
let renderer = UIGraphicsImageRenderer(size: CGSize(width: 300, height: 300))
let image = renderer.image { (context) in
for key in shapeItemKeys {
let currentShape = shapeItemsByKey[key]!
UIColor.black.setStroke()
context.stroke(renderer.format.bounds)
currentShape.color.setFill()
context.cgContext.addPath(currentShape.path.bezierPath.cgPath)
context.cgContext.strokePath()
context.cgContext.addPath(currentShape.path.bezierPath.cgPath)
context.cgContext.fillPath()
}
}
return image
}
}

How to convert pixel dimension to CG Size in Swift?

I have large images uploaded by users in Swift and I need to resize them all to 100x100px to create thumbnails to store in my server. So far I have found that this resizes an image given a CGSize:
func resizedImage(image: UIImage, size: CGSize) -> UIImage? {
let renderer = UIGraphicsImageRenderer(size: size)
return renderer.image { (context) in
image.draw(in: CGRect(origin: .zero, size: size))
}
}
Is there any way to create a CGSize knowing that my target size is strictly 100x100px?
Got this to work:
extension UIImage {
func resizedImage(pixelSize: (width: Int, height: Int)) -> UIImage? {
var size = CGSize(width: CGFloat(pixelSize.width) / UIScreen.main.scale, height: CGFloat(pixelSize.height) / UIScreen.main.scale)
let rect = AVMakeRect(aspectRatio: self.size, insideRect: CGRect(x:0, y:0, width: size.width, height: size.height))
let renderer = UIGraphicsImageRenderer(size: size)
return renderer.image { (context) in
self.draw(in: rect)
}
}
}
You should initialize your render based on the user device scale and multiply its width and height instead of dividing it:
extension UIImage {
func aspectFitScaled(to size: CGSize) -> UIImage {
let format = imageRendererFormat
format.opaque = false
format.scale = UIScreen.main.scale
let isLandscape = self.size.width > self.size.height
let ratio = isLandscape ? size.width / self.size.width : size.height / self.size.height
let drawSize = self.size.scaled(by: ratio)
let x = (size.width - drawSize.width) / 2
let y = (size.height - drawSize.height) / 2
let origin = CGPoint(x: x, y: y)
return UIGraphicsImageRenderer(size: size, format: format).image { _ in
draw(in: CGRect(origin: origin, size: drawSize))
}
}
}
usage:
class ViewController: UIViewController {
// imageView frame is 200 x 200
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
// original image size is (719.0, 808.0)
let image = UIImage(data: try! Data(contentsOf: URL(string: "https://i.stack.imgur.com/Xs4RX.jpg")!))!
imageView.backgroundColor = .gray
let ivImage = image.aspectFitScaled(to: imageView.frame.size)
imageView.image = ivImage
print("ivImage.size", ivImage.size) // (200.0, 200.0)
print("ivImage.scale", ivImage.scale) // screen scale 3.0 iPhone 8 Plus
// lets check the real image dimension
let data = ivImage.jpegData(compressionQuality: 1)!
let savedSize = UIImage(data: data)!.size
print("savedSize", savedSize) // savedSize (600.0, 600.0)
}
}

Resizing of NSImage not working

I am trying to resize a NSImage implementing a code that I got from this website https://gist.github.com/eiskalteschatten/dac3190fce5d38fdd3c944b45a4ca469, but it's not working.
Here is the code:
static func redimensionaNSImage(imagem: NSImage, tamanho: NSSize) -> NSImage {
var imagemRect: CGRect = CGRect(x: 0, y: 0, width: imagem.size.width, height: imagem.size.height)
let imagemRef = imagem.cgImage(forProposedRect: &imagemRect, context: nil, hints: nil)
return NSImage(cgImage: imagemRef!, size: tamanho)
}
I forgot to calculate the ratio. Now it's working fine.
static func redimensionaNSImage(imagem: NSImage, tamanho: NSSize) -> NSImage {
var ratio:Float = 0.0
let imageWidth = Float(imagem.size.width)
let imageHeight = Float(imagem.size.height)
let maxWidth = Float(tamanho.width)
let maxHeight = Float(tamanho.height)
// Get ratio (landscape or portrait)
if (imageWidth > imageHeight) {
// Landscape
ratio = maxWidth / imageWidth;
}
else {
// Portrait
ratio = maxHeight / imageHeight;
}
// Calculate new size based on the ratio
let newWidth = imageWidth * ratio
let newHeight = imageHeight * ratio
// Create a new NSSize object with the newly calculated size
let newSize:NSSize = NSSize(width: Int(newWidth), height: Int(newHeight))
// Cast the NSImage to a CGImage
var imageRect:CGRect = CGRect(x: 0, y: 0, width: imagem.size.width, height: imagem.size.height)
let imageRef = imagem.cgImage(forProposedRect: &imageRect, context: nil, hints: nil)
// Create NSImage from the CGImage using the new size
let imageWithNewSize = NSImage(cgImage: imageRef!, size: newSize)
// Return the new image
return imageWithNewSize
}