Rotating an NSImage in Swift - swift

I'm trying to rotate an image 90 degrees on Mac OS using Swift.
I've the following code:
#IBAction func rotateImage(_ sender: Any) {
let rotatedImage = image.imageRotatedByDegrees(degrees: CGFloat(90))
self.imageView?.image = rotatedImage
}
And
public extension NSImage {
public func imageRotatedByDegrees(degrees:CGFloat) -> NSImage {
var imageBounds = NSZeroRect
imageBounds.size = self.size
let pathBounds = NSBezierPath(rect: imageBounds)
var transform = NSAffineTransform()
transform.rotate(byDegrees: degrees)
pathBounds.transform(using: transform as AffineTransform)
let rotatedBounds:NSRect = NSMakeRect(NSZeroPoint.x, NSZeroPoint.y , self.size.width, self.size.height )
let rotatedImage = NSImage(size: rotatedBounds.size)
//Center the image within the rotated bounds
imageBounds.origin.x = NSMidX(rotatedBounds) - (NSWidth(imageBounds) / 2)
imageBounds.origin.y = NSMidY(rotatedBounds) - (NSHeight(imageBounds) / 2)
// Start a new transform
transform = NSAffineTransform()
// Move coordinate system to the center (since we want to rotate around the center)
transform.translateX(by: +(NSWidth(rotatedBounds) / 2 ), yBy: +(NSHeight(rotatedBounds) / 2))
transform.rotate(byDegrees: degrees)
// Move the coordinate system bak to normal
transform.translateX(by: -(NSWidth(rotatedBounds) / 2 ), yBy: -(NSHeight(rotatedBounds) / 2))
// Draw the original image, rotated, into the new image
rotatedImage.lockFocus()
transform.concat()
self.draw(in: imageBounds, from: NSZeroRect, operation: NSCompositingOperation.copy, fraction: 1.0)
rotatedImage.unlockFocus()
return rotatedImage
}
}
Initially the image looks like this:
Once I rotate the image it gets cropped to a square. So it looks like this:
This is undesired behaviour. I want it to have the same dimensions as before.

Related

Cropping visible part of UIImage in UIImageView for saliency

I'm doing attentionBased saliency and should pass image to the request. When the contentMode is ScaleAspectFill, the result of the request is not correct, because I use full image (not visible on screen part)
I'm trying to crop UIImage, but this method doesn't crop correctly
let newImage = cropImage(imageToCrop: imageView.image, toRect: imageView.frame)
func cropImage(imageToCrop: UIImage?, toRect rect: CGRect) -> UIImage? {
guard let imageRef = imageToCrop?.cgImage?.cropping(to: rect) else {
return nil
}
let cropped: UIImage = UIImage(cgImage: imageRef)
return cropped
}
How can I make saliency request only for the visible part of the image (which changes when change contentMode)?
If I understand your goal correctly...
Suppose we have this 640 x 360 image:
and we display it in a 240 x 240 image view, using .scaleAspectFill...
It looks like this (the red outline is the image view frame):
and, with .clipsToBounds = true:
we want to generate this new 360 x 360 image (that is, we want to keep the original image resolution... we don't want to end up with a 240 x 240 image):
To crop the visible portion of the image, we need to calculate the scaled rect, including the offset:
func cropImage(imageToCrop: UIImage?, toRect rect: CGRect) -> UIImage? {
guard let imageRef = imageToCrop?.cgImage?.cropping(to: rect) else {
return nil
}
let cropped: UIImage = UIImage(cgImage: imageRef)
return cropped
}
func myCrop(imgView: UIImageView) -> UIImage? {
// get the image from the imageView
guard let img = imgView.image else { return nil }
// image view rect
let vr: CGRect = imgView.bounds
// image size -- we need to account for scale
let imgSZ: CGSize = CGSize(width: img.size.width * img.scale, height: img.size.height * img.scale)
let viewRatio: CGFloat = vr.width / vr.height
let imgRatio: CGFloat = imgSZ.width / imgSZ.height
var newRect: CGRect = .zero
// calculate the rect that needs to be clipped from the full image
if viewRatio > imgRatio {
// image has a wider aspect ratio than the image view
// so top and bottom will be clipped
let f: CGFloat = imgSZ.width / vr.width
let h: CGFloat = vr.height * f
newRect.origin.y = (imgSZ.height - h) * 0.5
newRect.size.width = imgSZ.width
newRect.size.height = h
} else {
// image has a narrower aspect ratio than the image view
// so left and right will be clipped
let f: CGFloat = imgSZ.height / vr.height
let w: CGFloat = vr.width * f
newRect.origin.x = (imgSZ.width - w) * 0.5
newRect.size.width = w
newRect.size.height = imgSZ.height
}
return cropImage(imageToCrop: img, toRect: newRect)
}
and call it like this:
if let croppedImage = myCrop(imgView: theImageView) {
// do something with the new image
}

Drawing a Tiled Logo over NSImage an 45 degree

I'm trying to draw a logo tiled over an image at 45 degrees.But I always get a spacing on the left side.
var y_offset: CGFloat = logo.size.width * sin(45 * (CGFloat.pi / 180.0))
// the sin of the angle may return zero or negative value,
// it won't work with this formula
if y_offset >= 0 {
var x: CGFloat = 0
while x < size.width {
var y: CGFloat = 0
while y < size.height {
// move to this position
context.saveGState()
context.translateBy(x: x, y: y)
// draw text rotated around its center
context.rotate(by: ((CGFloat(-45) * CGFloat.pi ) / 180))
logo.draw(at:NSPoint(x:x,y:y), from: .zero, operation: .sourceOver, fraction: CGFloat(logotransparency))
// reset
context.restoreGState()
y = y + CGFloat(y_offset)
}
x = x + logo.size.width
}}
}
This is the result what I get.
As you can see there are some spacing present on the left side.I cannot figure out what I'm doing wrong.I have tried setting y to size.height and decrementing it by y_offset in the loop.But I get the same result.
Update:
var dirtyRect:NSRect=NSMakeRect(0, 0, size.width, size.height)
let deg45 = CGFloat.pi / 4
if let ciImage = logo.ciImage {
let ciTiled = ciImage.tiled(at: deg45).cropped(to: dirtyRect)
let color = NSColor.init(patternImage: NSImage.fromCIImage(ciTiled))
color.setFill()
context.fill(dirtyRect)
}
Updated answer
If you need more control over the appearance you can go with manually drawing the overlays. See below code for a fixed version of your original code with two options for spacing.
In production, you would of course want to avoid using ! and move the image loading out of the draw function (even though NSImage(named:) uses a cache).
override func draw(_ dirtyRect: NSRect) {
let bgImage = NSImage(named: "landscape")!
bgImage.draw(in: dirtyRect)
let deg45 = CGFloat.pi / 4
let logo = NSImage(named: "TextTile")!
let context = NSGraphicsContext.current!.cgContext
let h = logo.size.height // (sin(deg45) * logo.size.height) + (cos(deg45) * logo.size.height)
let w = logo.size.width // (sin(deg45) * logo.size.width ) + (cos(deg45) * logo.size.width )
var x: CGFloat = -w
while x < dirtyRect.width + w {
var y: CGFloat = -h
while y < dirtyRect.height + h {
context.saveGState()
context.translateBy(x: x, y: y)
context.rotate(by: deg45)
logo.draw(at:NSPoint(x:0,y:0),
from: .zero,
operation: .sourceOver,
fraction: 1)
context.restoreGState()
y = y + h
}
x = x + w
}
super.draw(dirtyRect)
}
Original answer
You can set a backgroundColor with a patternImage to for the effect of drawing image tiles in a rect.
To tilt the image by some angle, use CIImage's CIAffineTile option with some transformation.
Here is some example code:
import Cocoa
import CoreImage
class ViewController: NSViewController {
override func loadView() {
let size = CGSize(width: 500, height: 500)
let view = TiledView(frame: CGRect(origin: CGPointZero, size: size))
self.view = view
}
}
class TiledView: NSView {
override func draw(_ dirtyRect: NSRect) {
let bgImage = NSImage(named: "landscape")!
bgImage.draw(in: dirtyRect)
let deg45 = CGFloat.pi / 4
if let ciImage = NSImage(named: "TextTile")?.ciImage() {
let ciTiled = ciImage.tiled(at: deg45).cropped(to: dirtyRect)
let color = NSColor.init(patternImage: NSImage.fromCIImage(ciTiled))
color.setFill()
dirtyRect.fill()
}
super.draw(dirtyRect)
}
}
extension NSImage {
// source: https://rethunk.medium.com/convert-between-nsimage-and-ciimage-in-swift-d6c6180ef026
func ciImage() -> CIImage? {
guard let data = self.tiffRepresentation,
let bitmap = NSBitmapImageRep(data: data) else {
return nil
}
let ci = CIImage(bitmapImageRep: bitmap)
return ci
}
static func fromCIImage(_ ciImage: CIImage) -> NSImage {
let rep = NSCIImageRep(ciImage: ciImage)
let nsImage = NSImage(size: rep.size)
nsImage.addRepresentation(rep)
return nsImage
}
}
extension CIImage {
func tiled(at angle: CGFloat) -> CIImage {
// try different transforms here
let transform = CGAffineTransform(rotationAngle: angle)
return self.applyingFilter("CIAffineTile", parameters: [kCIInputTransformKey: transform])
}
}
The result looks like this:

How do I render CALayer sublayers in the correct position

I am trying to get an image drawn of a CALayer containing a number of sublayers positioned at specific points, but at the moment it does not honour the zPosition of the sublayers when I use CALayer.render(in ctx:). It works fine on screen but when rendering to PDF it seems to render them in the order they were created.
These sublayers that are positioned(x, y, angle) on the drawing layer.
One solution seems to be to override the render(in ctx:) method on the drawing layer, which seems to work except the rendering of the sublayers is in the incorrect position. They are all in the bottom left corner (0,0) and not rotated correctly.
override func render(in ctx: CGContext) {
if let layers:[CALayer] = self.sublayers {
let orderedLayers = layers.sorted(by: {
$0.zPosition < $1.zPosition
})
for v in orderedLayers {
v.render(in: ctx)
}
}
}
If I don't override this method then they are positioned correctly but just in the wrong zPosition - i.e. ones that should be at the bottom (zPosition-0) are at the top.
What am I missing here ? It seems I need to position the sublayers correctly somehow in the render(incts:) function?
How do I do this ? These sublayers have already been positioned on screen and all I am trying to do is generate an image of the drawing. This is done using the following function.
func createPdfData()->Data?{
DebugLog("")
let scale: CGFloat = 1
let mWidth = drawingLayer.frame.width * scale
let mHeight = drawingLayer.frame.height * scale
var cgRect = CGRect(x: 0, y: 0, width: mWidth, height: mHeight)
let documentInfo = [kCGPDFContextCreator as String:"MakeSpace(www.xxxx.com)",
kCGPDFContextTitle as String:"Layout Image",
kCGPDFContextAuthor as String:GlobalVars.shared.appUser?.username ?? "",
kCGPDFContextSubject as String:self.level?.imageCode ?? "",
kCGPDFContextKeywords as String:"XXXX, Layout"]
let data = NSMutableData()
guard let pdfData = CGDataConsumer(data: data),
let ctx = CGContext.init(consumer: pdfData, mediaBox: &cgRect, documentInfo as CFDictionary) else {
return nil}
ctx.beginPDFPage(nil)
ctx.saveGState()
ctx.scaleBy(x: scale, y: scale)
self.drawingLayer.render(in: ctx)
ctx.restoreGState()
ctx.endPDFPage()
ctx.closePDF()
return data as Data
}
This is what I ended up doing - and it seems to work.
class ZOrderDrawingLayer: CALayer {
override func render(in ctx: CGContext) {
if let layers:[CALayer] = self.sublayers {
let orderedLayers = layers.sorted(by: {
$0.zPosition < $1.zPosition
})
for v in orderedLayers {
ctx.saveGState()
// Translate and rotate the context using the sublayers
// size, position and transform (angle)
let w = v.bounds.width/2
let ww = w*w
let h = v.bounds.height/2
let hh = h*h
let c = sqrt(ww + hh)
let theta = asin(h/c)
let angle = atan2(v.transform.m12, v.transform.m11)
let x = c * cos(theta+angle)
let y = c * sin(theta+angle)
ctx.translateBy(x: v.position.x-x, y: v.position.y-y)
ctx.rotate(by: angle)
v.render(in: ctx)
ctx.restoreGState()
}
}
}
}

Sample NSImage and retain quality (swift 3)

I wrote an NSImage extension to allow me to take random samples of an image. I would like those samples to retain the same quality as the original image. However, they appear to be aliased or slightly blurry. Here's an example - the original drawn on the right and a random sample on the left:
I'm playing around with this in SpriteKit at the moment. Here's how I create the original image:
let bg = NSImage(imageLiteralResourceName: "ref")
let tex = SKTexture(image: bg)
let sprite = SKSpriteNode(texture: tex)
sprite.position = CGPoint(x: size.width/2, y:size.height/2)
addChild(sprite)
And here's how I create the sample:
let sample = bg.sample(size: NSSize(width: 100, height: 100))
let sampletex = SKTexture(image:sample!)
let samplesprite = SKSpriteNode(texture:sampletex)
samplesprite.position = CGPoint(x: 60, y:size.height/2)
addChild(samplesprite)
Here's the NSImage extension (and randomNumber func) that creates the sample:
extension NSImage {
/// Returns the height of the current image.
var height: CGFloat {
return self.size.height
}
/// Returns the width of the current image.
var width: CGFloat {
return self.size.width
}
func sample(size: NSSize) -> NSImage? {
// Resize the current image, while preserving the aspect ratio.
let source = self
// Make sure that we are within a suitable range
var checkedSize = size
checkedSize.width = floor(min(checkedSize.width,source.size.width * 0.9))
checkedSize.height = floor(min(checkedSize.height, source.size.height * 0.9))
// Get random points for the crop.
let x = randomNumber(range: 0...(Int(source.width) - Int(checkedSize.width)))
let y = randomNumber(range: 0...(Int(source.height) - Int(checkedSize.height)))
// Create the cropping frame.
var frame = NSRect(x: x, y: y, width: Int(checkedSize.width), height: Int(checkedSize.height))
// let ref = source.cgImage.cropping(to:frame)
let ref = source.cgImage(forProposedRect: &frame, context: nil, hints: nil)
let rep = NSBitmapImageRep(cgImage: ref!)
// Create a new image with the new size
let img = NSImage(size: checkedSize)
// Set a graphics context
img.lockFocus()
defer { img.unlockFocus() }
// Fill in the sample image
if rep.draw(in: NSMakeRect(0, 0, checkedSize.width, checkedSize.height),
from: frame,
operation: NSCompositingOperation.copy,
fraction: 1.0,
respectFlipped: false,
hints: [NSImageHintInterpolation:NSImageInterpolation.high.rawValue]) {
// Return the cropped image.
return img
}
// Return nil in case anything fails.
return nil
}
}
func randomNumber(range: ClosedRange<Int> = 0...100) -> Int {
let min = range.lowerBound
let max = range.upperBound
return Int(arc4random_uniform(UInt32(1 + max - min))) + min
}
I've tried this about 10 different ways and the results always seem to be a slightly blurry sample. I even checked for smudges on my screen. :)
How can I create a sample of an NSImage that retains the exact qualities of the section of the original source image?
Switching the interpolation mode to NSImageInterpolation.none was apparently sufficient in this case.
It's also important to handle the draw destination rect correctly. Since cgImage(forProposedRect:...) may change the proposed rect, you should use a destination rect that's based on it. You should basically use a copy of frame that's offset by (-x, -y) so it's relative to (0, 0) instead of (x, y).

Rotate NSImage in Swift, Cocoa, Mac OSX

Is there an easy way to rotate a NSImage in a Mac OSX app? Or just set the orientation from portrait to landscape using Swift?
I am playing around with CATransform3DMakeAffineTransform but I can't get it to work.
CATransform3DMakeAffineTransform(CGAffineTransformMakeRotation(CGFloat(M_PI) * 90/180))
It's the first time for me to work with transformations. So please be patient with me :) Maybe I'm working on a wrong approach...
Can anybody help me please?
Thanks!
public extension NSImage {
public func imageRotatedByDegreess(degrees:CGFloat) -> NSImage {
var imageBounds = NSZeroRect ; imageBounds.size = self.size
let pathBounds = NSBezierPath(rect: imageBounds)
var transform = NSAffineTransform()
transform.rotateByDegrees(degrees)
pathBounds.transformUsingAffineTransform(transform)
let rotatedBounds:NSRect = NSMakeRect(NSZeroPoint.x, NSZeroPoint.y, pathBounds.bounds.size.width, pathBounds.bounds.size.height )
let rotatedImage = NSImage(size: rotatedBounds.size)
//Center the image within the rotated bounds
imageBounds.origin.x = NSMidX(rotatedBounds) - (NSWidth(imageBounds) / 2)
imageBounds.origin.y = NSMidY(rotatedBounds) - (NSHeight(imageBounds) / 2)
// Start a new transform
transform = NSAffineTransform()
// Move coordinate system to the center (since we want to rotate around the center)
transform.translateXBy(+(NSWidth(rotatedBounds) / 2 ), yBy: +(NSHeight(rotatedBounds) / 2))
transform.rotateByDegrees(degrees)
// Move the coordinate system bak to normal
transform.translateXBy(-(NSWidth(rotatedBounds) / 2 ), yBy: -(NSHeight(rotatedBounds) / 2))
// Draw the original image, rotated, into the new image
rotatedImage.lockFocus()
transform.concat()
self.drawInRect(imageBounds, fromRect: NSZeroRect, operation: NSCompositingOperation.CompositeCopy, fraction: 1.0)
rotatedImage.unlockFocus()
return rotatedImage
}
var image = NSImage(named:"test.png")!.imageRotatedByDegreess(CGFloat(90)) //use only this values 90, 180, or 270
}
Updated for Swift 3:
public extension NSImage {
public func imageRotatedByDegreess(degrees:CGFloat) -> NSImage {
var imageBounds = NSZeroRect ; imageBounds.size = self.size
let pathBounds = NSBezierPath(rect: imageBounds)
var transform = NSAffineTransform()
transform.rotate(byDegrees: degrees)
pathBounds.transform(using: transform as AffineTransform)
let rotatedBounds:NSRect = NSMakeRect(NSZeroPoint.x, NSZeroPoint.y, pathBounds.bounds.size.width, pathBounds.bounds.size.height )
let rotatedImage = NSImage(size: rotatedBounds.size)
//Center the image within the rotated bounds
imageBounds.origin.x = NSMidX(rotatedBounds) - (NSWidth(imageBounds) / 2)
imageBounds.origin.y = NSMidY(rotatedBounds) - (NSHeight(imageBounds) / 2)
// Start a new transform
transform = NSAffineTransform()
// Move coordinate system to the center (since we want to rotate around the center)
transform.translateX(by: +(NSWidth(rotatedBounds) / 2 ), yBy: +(NSHeight(rotatedBounds) / 2))
transform.rotate(byDegrees: degrees)
// Move the coordinate system bak to normal
transform.translateX(by: -(NSWidth(rotatedBounds) / 2 ), yBy: -(NSHeight(rotatedBounds) / 2))
// Draw the original image, rotated, into the new image
rotatedImage.lockFocus()
transform.concat()
self.draw(in: imageBounds, from: NSZeroRect, operation: NSCompositingOperation.copy, fraction: 1.0)
rotatedImage.unlockFocus()
return rotatedImage
}
}
class SomeClass: NSViewController {
var image = NSImage(named:"test.png")!.imageRotatedByDegreess(degrees: CGFloat(90)) //use only this values 90, 180, or 270
}
Thank for this solution, however it did not worked perfectly for me.
As you may have noticed that pathBounds is not used anywhere. In my opinion is has to be used like so:
let rotatedBounds:NSRect = NSMakeRect(NSZeroPoint.x, NSZeroPoint.y , pathBounds.bounds.size.width, pathBounds.bounds.size.height )
Otherwise the image will be rotated but cropped to a square bounds.
Letting IKImageView do the heavy lifting:
import Quartz
extension NSImage {
func imageRotated(by degrees: CGFloat) -> NSImage {
let imageRotator = IKImageView()
var imageRect = CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height)
let cgImage = self.cgImage(forProposedRect: &imageRect, context: nil, hints: nil)
imageRotator.setImage(cgImage, imageProperties: [:])
imageRotator.rotationAngle = CGFloat(-(degrees / 180) * CGFloat(M_PI))
let rotatedCGImage = imageRotator.image().takeUnretainedValue()
return NSImage(cgImage: rotatedCGImage, size: NSSize.zero)
}
}
Here's a simple Swift (4+) solution to drawing an image that is rotated around the center:
extension NSImage {
/// Rotates the image by the specified degrees around the center.
/// Note that if the angle is not a multiple of 90°, parts of the rotated image may be drawn outside the image bounds.
func rotated(by angle: CGFloat) -> NSImage {
let img = NSImage(size: self.size, flipped: false, drawingHandler: { (rect) -> Bool in
let (width, height) = (rect.size.width, rect.size.height)
let transform = NSAffineTransform()
transform.translateX(by: width / 2, yBy: height / 2)
transform.rotate(byDegrees: angle)
transform.translateX(by: -width / 2, yBy: -height / 2)
transform.concat()
self.draw(in: rect)
return true
})
img.isTemplate = self.isTemplate // preserve the underlying image's template setting
return img
}
}
This one works also for non-square images, Swift 5.
extension NSImage {
func rotated(by degrees : CGFloat) -> NSImage {
var imageBounds = NSRect(x: 0, y: 0, width: size.width, height: size.height)
let rotatedSize = AffineTransform(rotationByDegrees: degrees).transform(size)
let newSize = CGSize(width: abs(rotatedSize.width), height: abs(rotatedSize.height))
let rotatedImage = NSImage(size: newSize)
imageBounds.origin = CGPoint(x: newSize.width / 2 - imageBounds.width / 2, y: newSize.height / 2 - imageBounds.height / 2)
let otherTransform = NSAffineTransform()
otherTransform.translateX(by: newSize.width / 2, yBy: newSize.height / 2)
otherTransform.rotate(byDegrees: degrees)
otherTransform.translateX(by: -newSize.width / 2, yBy: -newSize.height / 2)
rotatedImage.lockFocus()
otherTransform.concat()
draw(in: imageBounds, from: CGRect.zero, operation: NSCompositingOperation.copy, fraction: 1.0)
rotatedImage.unlockFocus()
return rotatedImage
}
}
Building on #FrankByte.com's code, this version should extend correctly in both x and y on any image and any rotation.
extension NSImage {
func rotated(by degrees: CGFloat) -> NSImage {
let sinDegrees = abs(sin(degrees * CGFloat.pi / 180.0))
let cosDegrees = abs(cos(degrees * CGFloat.pi / 180.0))
let newSize = CGSize(width: size.height * sinDegrees + size.width * cosDegrees,
height: size.width * sinDegrees + size.height * cosDegrees)
let imageBounds = NSRect(x: (newSize.width - size.width) / 2,
y: (newSize.height - size.height) / 2,
width: size.width, height: size.height)
let otherTransform = NSAffineTransform()
otherTransform.translateX(by: newSize.width / 2, yBy: newSize.height / 2)
otherTransform.rotate(byDegrees: degrees)
otherTransform.translateX(by: -newSize.width / 2, yBy: -newSize.height / 2)
let rotatedImage = NSImage(size: newSize)
rotatedImage.lockFocus()
otherTransform.concat()
draw(in: imageBounds, from: CGRect.zero, operation: NSCompositingOperation.copy, fraction: 1.0)
rotatedImage.unlockFocus()
return rotatedImage
}
}