I need to add some transparent padding around an NSImage.I referred this SO post Placing an NSImage on a transparent canvas in Swift
It was pointing to the the Github code https://gist.github.com/MaciejGad/11d8469b218817290ee77012edb46608 where it mentions of tweaking the resizeWhileMaintainingAspectRatioToSize method. I tried what the post has suggested but swift seems to miss the NSImage.copy method with the NSrect as parameter.So wrote the below extension method with my limited knowledge
func addPadding() -> NSImage? {
let newSize: NSSize
let widthRatio = size.width / self.size.width
let heightRatio = size.height / self.size.height
if widthRatio > heightRatio {
newSize = NSSize(width: floor(self.size.width * widthRatio), height: floor(self.size.height * widthRatio))
} else {
newSize = NSSize(width: floor(self.size.width * heightRatio), height: floor(self.size.height * heightRatio))
}
let img = NSImage(size: newSize)
let frame = NSRect(x: 0, y: 0, width: newSize.width, height: newSize.height)
// Set the drawing context and make sure to remove the focus before returning.
img.lockFocus()
defer { img.unlockFocus() }
let representation = self.bestRepresentation(for: frame, context: nil, hints: nil)
// Draw the new image
if representation!.draw(in: frame) {
return img
}
// Return nil in case something went wrong.
return nil
}
But this does nothing to add padding to the image.
You can do it with this code:
extension NSImage {
var pixelSize: NSSize {
if let rep = self.representations.first {
let size = NSSize(
width: rep.pixelsWide,
height: rep.pixelsHigh
)
return size
}
return NSSize(width: 0, height: 0)
}
func withPadding(percent: Int) -> NSImage {
let originalSize = self.pixelSize
let newHeight = originalSize.height - originalSize.height * CGFloat(percent) / 100
let newWidth = originalSize.width - originalSize.width * CGFloat(percent) / 100
let sizeWithPadding = NSSize(
width: newWidth,
height: newHeight
)
guard let representation = NSBitmapImageRep(
bitmapDataPlanes: nil,
pixelsWide: Int(originalSize.width),
pixelsHigh: Int(originalSize.height),
bitsPerSample: 8,
samplesPerPixel: 4,
hasAlpha: true,
isPlanar: false,
colorSpaceName: .calibratedRGB,
bytesPerRow: 0,
bitsPerPixel: 0
)
else {
return self
}
representation.size = originalSize
NSGraphicsContext.saveGraphicsState()
NSGraphicsContext.current = NSGraphicsContext.init(bitmapImageRep: representation)
let xPoint = (originalSize.width - newWidth) / 2
let yPoint = (originalSize.height - newHeight) / 2
self.draw(
in: NSRect(
x: xPoint,
y: yPoint,
width: sizeWithPadding.width,
height: sizeWithPadding.height
),
from: .zero,
operation: .copy,
fraction: 1.0
)
NSGraphicsContext.restoreGraphicsState()
let newImage = NSImage(size: originalSize)
newImage.addRepresentation(representation)
return newImage
}
}
Related
I have large images uploaded by users in Swift and I need to resize them all to 100x100px to create thumbnails to store in my server. So far I have found that this resizes an image given a CGSize:
func resizedImage(image: UIImage, size: CGSize) -> UIImage? {
let renderer = UIGraphicsImageRenderer(size: size)
return renderer.image { (context) in
image.draw(in: CGRect(origin: .zero, size: size))
}
}
Is there any way to create a CGSize knowing that my target size is strictly 100x100px?
Got this to work:
extension UIImage {
func resizedImage(pixelSize: (width: Int, height: Int)) -> UIImage? {
var size = CGSize(width: CGFloat(pixelSize.width) / UIScreen.main.scale, height: CGFloat(pixelSize.height) / UIScreen.main.scale)
let rect = AVMakeRect(aspectRatio: self.size, insideRect: CGRect(x:0, y:0, width: size.width, height: size.height))
let renderer = UIGraphicsImageRenderer(size: size)
return renderer.image { (context) in
self.draw(in: rect)
}
}
}
You should initialize your render based on the user device scale and multiply its width and height instead of dividing it:
extension UIImage {
func aspectFitScaled(to size: CGSize) -> UIImage {
let format = imageRendererFormat
format.opaque = false
format.scale = UIScreen.main.scale
let isLandscape = self.size.width > self.size.height
let ratio = isLandscape ? size.width / self.size.width : size.height / self.size.height
let drawSize = self.size.scaled(by: ratio)
let x = (size.width - drawSize.width) / 2
let y = (size.height - drawSize.height) / 2
let origin = CGPoint(x: x, y: y)
return UIGraphicsImageRenderer(size: size, format: format).image { _ in
draw(in: CGRect(origin: origin, size: drawSize))
}
}
}
usage:
class ViewController: UIViewController {
// imageView frame is 200 x 200
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
// original image size is (719.0, 808.0)
let image = UIImage(data: try! Data(contentsOf: URL(string: "https://i.stack.imgur.com/Xs4RX.jpg")!))!
imageView.backgroundColor = .gray
let ivImage = image.aspectFitScaled(to: imageView.frame.size)
imageView.image = ivImage
print("ivImage.size", ivImage.size) // (200.0, 200.0)
print("ivImage.scale", ivImage.scale) // screen scale 3.0 iPhone 8 Plus
// lets check the real image dimension
let data = ivImage.jpegData(compressionQuality: 1)!
let savedSize = UIImage(data: data)!.size
print("savedSize", savedSize) // savedSize (600.0, 600.0)
}
}
I'm using the following code to resize an image.But i keep getting a white border at the bottom
func resize(withSize targetSize: NSSize) -> NSImage? {
let size=NSSize(width: floor((targetSize.width/(NSScreen.main?.backingScaleFactor)!)), height:floor( (targetSize.height/(NSScreen.main?.backingScaleFactor)!)))
let frame = NSRect(x: 0, y: 0, width: floor(size.width), height: floor(size.height))
guard let representation = self.bestRepresentation(for: frame, context: nil,hints: nil) else {
return nil
}
let image = NSImage(size: size, flipped: false, drawingHandler: { (_) -> Bool in
return representation.draw(in: frame)
})
return image
}
I have tried using floor function to round off the values.But still the same issue.
extension NSImage {
func resize(withSize targetSize: NSSize) -> NSImage? {
let ratioX = targetSize.width / size.width / (NSScreen.main?.backingScaleFactor ?? 1)
let ratioY = targetSize.height / size.height / (NSScreen.main?.backingScaleFactor ?? 1)
let ratio = ratioX < ratioY ? ratioX : ratioY
let canvasSize = NSSize(width: size.width * ratio, height: size.height * ratio)
let frame = NSRect(origin: .zero, size: canvasSize)
guard let representation = bestRepresentation(for: frame, context: nil, hints: nil)
else { return nil }
return NSImage(size: canvasSize, flipped: false) { _ in representation.draw(in: frame) }
}
}
let image = NSImage(contentsOf: URL(string: "http://i.stack.imgur.com/Xs4RX.jpg")!)!
let resized = image.resize(withSize: .init(width: 400, height: 300)) // w 267 h 300
If you would like to take into account the screen scale just remove the division by (NSScreen.main?.backingScaleFactor ?? 1) from the method when calculating ratioX and ratioY
I have created an image from UIView using UIGraphicsGetCurrentContext. It's work fine but when I resized that image to larger size its be blurred with bad quality. Can it possibility to keep quality when resize? I have tried many ways but not work.
- code image:
func image(with view: UIView) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(view.bounds.size, view.isOpaque, 0.0)
defer { UIGraphicsEndImageContext() }
if let context = UIGraphicsGetCurrentContext() {
view.drawHierarchy(in: view.bounds, afterScreenUpdates: true)
context.setAllowsAntialiasing(true)
context.setShouldAntialias(true)
let image = UIGraphicsGetImageFromCurrentImageContext()
return image
}
return nil
}
code I have used to resize, this extension of UIImage
func resizedImage(newSize: CGSize) -> UIImage {
// Guard newSize is different
guard self.size != newSize else { return self }
let aspect_ratio = self.size.width / self.size.height
var image_w = newSize.height * aspect_ratio
let finalSize = CGSize(width: image_w, height: newSize.height)
UIGraphicsBeginImageContextWithOptions(finalSize, false, 0.0)
self.draw(in: CGRect(x: 0, y: 0, width: (CGFloat)(finalSize.width), height: (CGFloat)(newSize.height)))
let newImage: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return newImage
}
and the example image:image
Thanks
Try this:
extension UIImage {
func resizeImage(targetSize: CGSize) -> UIImage {
let size = self.size
let widthRatio = targetSize.width / size.width
let heightRatio = targetSize.height / size.height
let newSize = widthRatio > heightRatio ? CGSize(width: size.width * heightRatio, height: size.height * heightRatio) : CGSize(width: size.width * widthRatio, height: size.height * widthRatio)
let rect = CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height)
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
self.draw(in: rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
}
I am referring to ZImageCropper for my project to crop an image. However, it reduces the quality of the image during conversion.
I know one of the reasons the quality of image was reduced is due to UIGraphicsBeginImageContextWithOptions(imageView.bounds.size, false, 1)
I have tried changing it to UIGraphicsBeginImageContextWithOptions(imageView.bounds.size, false, 0) and UIGraphicsBeginImageContextWithOptions(imageView.bounds.size, false, UIScreen.main.scale)
I reference my code from Resize and Crop 2 Images affected the original image quality It does help but when I check the quality of the image, it is still reduced. Is there any way to prevent a reduction in image quality?
in iOS 10 and above you can resize any UIImage without quality loss, hope this helps:
func resize(targetSize: CGSize) -> UIImage {
if #available(iOS 10.0, *) {
return UIGraphicsImageRenderer(size: targetSize).image { _ in
self.draw(in: CGRect(origin: .zero, size: targetSize))
}
} else {
return resizeImage(maxSize: targetSize.width)
}
}
func resizeImage(maxSize: CGFloat) -> UIImage {
var newWidth = size.width
var newHeight = size.height
if size.width >= maxSize {
newWidth = min(maxSize, size.width)
newHeight = maxSize * size.height / size.width
} else if size.height >= maxSize {
newHeight = min(maxSize, size.height)
newWidth = maxSize * size.width / size.height
}
UIGraphicsBeginImageContext(CGSize(width: newWidth, height: newHeight))
draw(in: CGRect(x: 0, y: 0, width: newWidth, height: newHeight))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
I am trying to resize a NSImage implementing a code that I got from this website https://gist.github.com/eiskalteschatten/dac3190fce5d38fdd3c944b45a4ca469, but it's not working.
Here is the code:
static func redimensionaNSImage(imagem: NSImage, tamanho: NSSize) -> NSImage {
var imagemRect: CGRect = CGRect(x: 0, y: 0, width: imagem.size.width, height: imagem.size.height)
let imagemRef = imagem.cgImage(forProposedRect: &imagemRect, context: nil, hints: nil)
return NSImage(cgImage: imagemRef!, size: tamanho)
}
I forgot to calculate the ratio. Now it's working fine.
static func redimensionaNSImage(imagem: NSImage, tamanho: NSSize) -> NSImage {
var ratio:Float = 0.0
let imageWidth = Float(imagem.size.width)
let imageHeight = Float(imagem.size.height)
let maxWidth = Float(tamanho.width)
let maxHeight = Float(tamanho.height)
// Get ratio (landscape or portrait)
if (imageWidth > imageHeight) {
// Landscape
ratio = maxWidth / imageWidth;
}
else {
// Portrait
ratio = maxHeight / imageHeight;
}
// Calculate new size based on the ratio
let newWidth = imageWidth * ratio
let newHeight = imageHeight * ratio
// Create a new NSSize object with the newly calculated size
let newSize:NSSize = NSSize(width: Int(newWidth), height: Int(newHeight))
// Cast the NSImage to a CGImage
var imageRect:CGRect = CGRect(x: 0, y: 0, width: imagem.size.width, height: imagem.size.height)
let imageRef = imagem.cgImage(forProposedRect: &imageRect, context: nil, hints: nil)
// Create NSImage from the CGImage using the new size
let imageWithNewSize = NSImage(cgImage: imageRef!, size: newSize)
// Return the new image
return imageWithNewSize
}