The code below is trying to set pixels to an offline bitmap and draw the bitmap to the screen. Unfortunately, it crashes.
import UIKit
class GameView: UIView {
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
}
override init(frame: CGRect) {
super.init(frame: frame)
}
func createBitmapContext(pixelsWide: Int, _ pixelsHigh: Int) -> CGContextRef? {
let bytesPerPixel = 4
let bytesPerRow = bytesPerPixel * pixelsWide
let bitsPerComponent = 8
let byteCount = (bytesPerRow * pixelsHigh)
let colorSpace = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB)
let pixels = UnsafeMutablePointer<CUnsignedChar>.alloc(byteCount)
if pixels == nil {
return nil
}
let bitmapInfo = CGImageAlphaInfo.PremultipliedFirst.rawValue | CGBitmapInfo.ByteOrder32Little.rawValue
let context = CGBitmapContextCreate(pixels, pixelsWide, pixelsHigh, bitsPerComponent, bytesPerRow, colorSpace, bitmapInfo)
return context
}
override func drawRect(rect: CGRect) {
let width = 200
let height = 300
let boundingBox = CGRectMake(0, 0, CGFloat(width), CGFloat(height))
let context = createBitmapContext(width, height)
let data = CGBitmapContextGetData(context)
var currentPixel: [UInt32] = unsafeBitCast(data, [UInt32].self)
var n = 0
for var j = 0; j < height; j++ {
for var i = 0; i < width; i++ {
currentPixel[n++] = 0
}
}
let image = CGBitmapContextCreateImage(context!)
CGContextDrawImage(context!, boundingBox, image)
}
}
There are couple changes required.
1. Crashing because of there was memory overrun.
2. You are creating image from newly created context and writing to same context instead of current drawing context.
Use this modified drawRect Function:
override func drawRect(rect: CGRect) {
let width = 200
let height = 300
let boundingBox = CGRectMake(0, 0, CGFloat(width), CGFloat(height))
let context = createBitmapContext(width, height)
let data = CGBitmapContextGetData(context)
let pixels = UnsafeMutablePointer<CUnsignedChar>(data)
var n = 0
for var j = 0; j < height; j++ {
for var i = 0; i < width; i++ {
pixels[n++] = 0 //B
pixels[n++] = 255 //G
pixels[n++] = 0 //R
pixels[n++] = 255 //A
}
}
let image = CGBitmapContextCreateImage(context!)
if let currentContext: CGContext! = UIGraphicsGetCurrentContext() {
UIImagePNGRepresentation(UIImage(CGImage:image!))?.writeToFile("/Users/admin/Desktop/aaaaa.png", atomically: true)
CGContextDrawImage(currentContext, boundingBox, image)
}
}
Related
I'm trying to figure out how setColor works. I have the following code:
lazy var imageView:NSImageView = {
let imageView = NSImageView(frame: view.frame)
return imageView
}()
override func viewDidLoad() {
super.viewDidLoad()
createColorProjection()
view.wantsLayer = true
view.addSubview(imageView)
view.needsDisplay = true
}
func createColorProjection() {
var bitmap = NSBitmapImageRep(cgImage: cgImage!)
var x = 0
while x < bitmap.pixelsWide {
var y = 0
while y < bitmap.pixelsHigh {
//pixels[Point(x: x, y: y)] = (getColor(x: x, y: y, bitmap: bitmap))
bitmap.setColor(NSColor(cgColor: .black)!, atX: x, y: y)
y += 1
}
x += 1
}
let image = createImage(bitmap: bitmap)
imageView.image = image
imageView.needsDisplay = true
}
func createImage(bitmap:NSBitmapImageRep) -> NSImage {
let image = bitmap.cgImage
return NSImage(cgImage: image! , size: CGSize(width: image!.width, height: image!.height))
}
The intention of the code is to change a photo (a rainbow) to be entirely black (I'm just testing with black right now to make sure I understand how it works). However, when I run the program, the unchanged picture of the rainbow is shown, not a black photo.
I am getting these errors:
Unrecognized colorspace number -1 and Unknown number of components for colorspace model -1.
Thanks.
First, you're right: setColor has been broken at least since Catalina. Apple hasn't fixed it probably because it's so slow and inefficient, and nobody ever used it.
Second, docs say NSBitmapImageRep(cgImage: CGImage) produces a read-only bitmap so your code wouldn't have worked even if setColor worked.
As Alexander says, making your own CIFilter is the best way to change a photo's pixels to different colors. Writing and implementing the OpenGL isn't easy, but it's the best.
If you were to add an extension to NSBitmapImageRep like this:
extension NSBitmapImageRep {
func setColorNew(_ color: NSColor, atX x: Int, y: Int) {
guard let data = bitmapData else { return }
let ptr = data + bytesPerRow * y + samplesPerPixel * x
ptr[0] = UInt8(color.redComponent * 255.1)
ptr[1] = UInt8(color.greenComponent * 255.1)
ptr[2] = UInt8(color.blueComponent * 255.1)
if samplesPerPixel > 3 {
ptr[3] = UInt8(color.alphaComponent * 255.1)
}
}
}
Then simply changing an image's pixels could be done like this:
func changePixels(image: NSImage, newColor: NSColor) -> NSImage {
guard let imgData = image.tiffRepresentation,
let bitmap = NSBitmapImageRep(data: imgData),
let color = newColor.usingColorSpace(.deviceRGB)
else { return image }
var y = 0
while y < bitmap.pixelsHigh {
var x = 0
while x < bitmap.pixelsWide {
bitmap.setColorNew(color, atX: x, y: y)
x += 1
}
y += 1
}
let newImage = NSImage(size: image.size)
newImage.addRepresentation(bitmap)
return newImage
}
I'm trying to build an app to find similarities as a percentage between two UIImage, the app captures the user stroke pencil and converts it to UIImage then finds similarity pixel by pixel with the image stored on the app.
I found code that finds difference (NOT similarity), that is work if I have two images as JPEG but in my case that gives me an error when I click the button
convertoand:]: unrecognized selector sent to instance 0x10900f2d0"
The button will capture stroke pencil and then compare in this func compareImages(image1: imageStroke, image2: imageOnApp) then print it on Label:
#IBAction func btnFindSimilarity(_ sender: Any) {
let imgRect = CGRect(x: 0, y: 0, width: 400, height: 100)
let imageStroke = canvasView.drawing.image(from: imgRect, scale: 1.0)
let compareResult = compareImages(image1: imageStroke, image2: imageOnApp)
lblPrintScore.text = "\(String(describing: compareResult))"
}
find diffrence code:
func pixelValues(fromCGImage imageRef: CGImage?) -> [UInt8]?
{
var width = 0
var height = 0
var pixelValues: [UInt8]?
if let imageRef = imageRef {
width = imageRef.width
height = imageRef.height
let bitsPerComponent = imageRef.bitsPerComponent
let bytesPerRow = imageRef.bytesPerRow
let totalBytes = height * bytesPerRow
let bitmapInfo = imageRef.bitmapInfo
let colorSpace = CGColorSpaceCreateDeviceRGB()
var intensities = [UInt8](repeating: 0, count: totalBytes)
let contextRef = CGContext(data: &intensities,
width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bytesPerRow: bytesPerRow,
space: colorSpace,
bitmapInfo: bitmapInfo.rawValue)
contextRef?.draw(imageRef, in: CGRect(x: 0.0, y: 0.0, width: CGFloat(width), height: CGFloat(height)))
pixelValues = intensities
}
return pixelValues
}
func compareImages(image1: UIImage, image2: UIImage) -> Double? {
guard let data1 = pixelValues(fromCGImage: image1.cgImage),
let data2 = pixelValues(fromCGImage: image2.cgImage),
data1.count == data2.count else {
return nil
}
let width = Double(image1.size.width)
let height = Double(image1.size.height)
return zip(data1, data2)
.enumerated()
.reduce(0.0) {
$1.offset % 4 == 3 ? $0 : $0 + abs(Double($1.element.0) - Double($1.element.1))
}
* 100 / (width * height * 3.0) / 255.0
}
I tried to change the code to find similarities but I was failed
Although the following example is for SwiftUI, the code can be used elsewhere.
I found on SO how to extract the colors from an UIImage (see findColors How do I get the color of a pixel in a UIImage with Swift?),
and using that, I compare 2 images for rgb similarities (see isRGBSimilar).
#main
struct TestApp: App {
var body: some Scene {
WindowGroup {
ContentView()
}
}
}
struct ContentView: View {
#State var simili = 0.0
var body: some View {
Text(String(simili))
.onAppear {
guard let img1 = UIImage(named: "cat1.png") else { return }
guard let img2 = UIImage(named: "cat2.png") else { return }
print("---> img1: \(img1.size) img2: \(img2.size)")
if let percent = compareImages(image1: img1, image2: img2) {
print("----> percent: \(percent) \n")
simili = percent
}
}
}
// from: https://stackoverflow.com/questions/25146557/how-do-i-get-the-color-of-a-pixel-in-a-uiimage-with-swift
func findColors(_ image: UIImage) -> [UIColor] {
let pixelsWide = Int(image.size.width)
let pixelsHigh = Int(image.size.height)
guard let pixelData = image.cgImage?.dataProvider?.data else { return [] }
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
var imageColors: [UIColor] = []
for x in 0..<pixelsWide {
for y in 0..<pixelsHigh {
let point = CGPoint(x: x, y: y)
let pixelInfo: Int = ((pixelsWide * Int(point.y)) + Int(point.x)) * 4
let color = UIColor(red: CGFloat(data[pixelInfo]) / 255.0,
green: CGFloat(data[pixelInfo + 1]) / 255.0,
blue: CGFloat(data[pixelInfo + 2]) / 255.0,
alpha: CGFloat(data[pixelInfo + 3]) / 255.0)
imageColors.append(color)
}
}
return imageColors
}
func compareImages(image1: UIImage, image2: UIImage) -> Double? {
let data1 = findColors(image1)
let data2 = findColors(image2)
guard data1.count == data2.count else { return nil }
var similarr = [Bool]()
for i in data1.indices {
similarr.append(isRGBSimilar(data1[i], data2[i], 0.1)) // <-- set epsilon
}
let simi = similarr.filter{$0}
let result = (Double(simi.count * 100) / (Double(image1.size.width) * Double(image1.size.height)))
return result
}
// compare 2 colors to be within d of each other
func isRGBSimilar(_ f: UIColor, _ t: UIColor, _ d: CGFloat) -> Bool {
var r1: CGFloat = 0; var g1: CGFloat = 0; var b1: CGFloat = 0; var a1: CGFloat = 0
f.getRed(&r1, green: &g1, blue: &b1, alpha: &a1)
var r2: CGFloat = 0; var g2: CGFloat = 0; var b2: CGFloat = 0; var a2: CGFloat = 0
t.getRed(&r2, green: &g2, blue: &b2, alpha: &a2)
return abs(r1 - r2) <= d && abs(g1 - g2) <= d && abs(b1 - b2) <= d && abs(a1 - a2) <= d
}
I am trying to crop a selected portion of NSImage which is fitted as per ProportionallyUpOrDown(AspectFill) Mode.
I am drawing a frame using mouse dragged event like this:
class CropImageView: NSImageView {
var startPoint: NSPoint!
var shapeLayer: CAShapeLayer!
var flagCheck = false
var finalPoint: NSPoint!
override init(frame frameRect: NSRect) {
super.init(frame: frameRect)
}
required init?(coder: NSCoder) {
super.init(coder: coder)
}
override func draw(_ dirtyRect: NSRect) {
super.draw(dirtyRect)
}
override var image: NSImage? {
set {
self.layer = CALayer()
self.layer?.contentsGravity = kCAGravityResizeAspectFill
self.layer?.contents = newValue
self.wantsLayer = true
super.image = newValue
}
get {
return super.image
}
}
override func mouseDown(with event: NSEvent) {
self.startPoint = self.convert(event.locationInWindow, from: nil)
if self.shapeLayer != nil {
self.shapeLayer.removeFromSuperlayer()
self.shapeLayer = nil
}
self.flagCheck = true
var pixelColor: NSColor = NSReadPixel(startPoint) ?? NSColor()
shapeLayer = CAShapeLayer()
shapeLayer.lineWidth = 1.0
shapeLayer.fillColor = NSColor.clear.cgColor
if pixelColor == NSColor.black {
pixelColor = NSColor.color_white
} else {
pixelColor = NSColor.black
}
shapeLayer.strokeColor = pixelColor.cgColor
shapeLayer.lineDashPattern = [1]
self.layer?.addSublayer(shapeLayer)
var dashAnimation = CABasicAnimation()
dashAnimation = CABasicAnimation(keyPath: "lineDashPhase")
dashAnimation.duration = 0.75
dashAnimation.fromValue = 0.0
dashAnimation.toValue = 15.0
dashAnimation.repeatCount = 0.0
shapeLayer.add(dashAnimation, forKey: "linePhase")
}
override func mouseDragged(with event: NSEvent) {
let point: NSPoint = self.convert(event.locationInWindow, from: nil)
var newPoint: CGPoint = self.startPoint
let xDiff = point.x - self.startPoint.x
let yDiff = point.y - self.startPoint.y
let dist = min(abs(xDiff), abs(yDiff))
newPoint.x += xDiff > 0 ? dist : -dist
newPoint.y += yDiff > 0 ? dist : -dist
let path = CGMutablePath()
path.move(to: self.startPoint)
path.addLine(to: NSPoint(x: self.startPoint.x, y: newPoint.y))
path.addLine(to: newPoint)
path.addLine(to: NSPoint(x: newPoint.x, y: self.startPoint.y))
path.closeSubpath()
self.shapeLayer.path = path
}
override func mouseUp(with event: NSEvent) {
self.finalPoint = self.convert(event.locationInWindow, from: nil)
}
}
and selected this area as shown in picture using black dotted line:
My Cropping Code logic is this:
// resize Image Methods
extension CropProfileView {
func resizeImage(image: NSImage) -> Data {
var scalingFactor: CGFloat = 0.0
if image.size.width >= image.size.height {
scalingFactor = image.size.width/cropImgView.size.width
} else {
scalingFactor = image.size.height/cropImgView.size.height
}
let width = (self.cropImgView.finalPoint.x - self.cropImgView.startPoint.x) * scalingFactor
let height = (self.cropImgView.startPoint.y - self.cropImgView.finalPoint.y) * scalingFactor
let xPos = ((image.size.width/2) - (cropImgView.bounds.midX - self.cropImgView.startPoint.x) * scalingFactor)
let yPos = ((image.size.height/2) - (cropImgView.bounds.midY - (cropImgView.size.height - self.cropImgView.startPoint.y)) * scalingFactor)
var croppedRect: NSRect = NSRect(x: xPos, y: yPos, width: width, height: height)
let imageRef = image.cgImage(forProposedRect: &croppedRect, context: nil, hints: nil)
guard let croppedImage = imageRef?.cropping(to: croppedRect) else {return Data()}
let imageWithNewSize = NSImage(cgImage: croppedImage, size: NSSize(width: width, height: height))
guard let data = imageWithNewSize.tiffRepresentation,
let rep = NSBitmapImageRep(data: data),
let imgData = rep.representation(using: .png, properties: [.compressionFactor: NSNumber(floatLiteral: 0.25)]) else {
return imageWithNewSize.tiffRepresentation ?? Data()
}
return imgData
}
}
With this cropping logic i am getting this output:
I think as image is AspectFill thats why its not getting cropped in perfect size as per selected frame. Here if you look at output: xpositon & width & heights are not perfect. Or probably i am not calculating these co-ordinates properly. Let me know the faults probably i am calculating someting wrong.
Note: the CropImageView class in the question is a subclass of NSImageView but the view is layer-hosting and the image is drawn by the layer, not by NSImageView. imageScaling is not used.
When deciding which scaling factor to use you have to take the size of the image view into account. If the image size is width:120, height:100 and the image view size is width:120, height 80 then image.size.width >= image.size.height is true and image.size.width/cropImgView.size.width is 1 but the image is scaled because image.size.height/cropImgView.size.height is 1.25. Calculate the horizontal and vertical scaling factors and use the largest.
See How to crop a UIImageView to a new UIImage in 'aspect fill' mode?
Here's the calculation of croppedRect assuming cropImgView.size returns self.layer!.bounds.size.
var scalingWidthFactor: CGFloat = image.size.width/cropImgView.size.width
var scalingHeightFactor: CGFloat = image.size.height/cropImgView.size.height
var xOffset: CGFloat = 0
var yOffset: CGFloat = 0
switch cropImgView.layer?.contentsGravity {
case CALayerContentsGravity.resize: break
case CALayerContentsGravity.resizeAspect:
if scalingWidthFactor > scalingHeightFactor {
scalingHeightFactor = scalingWidthFactor
yOffset = (cropImgView.size.height - (image.size.height / scalingHeightFactor)) / 2
}
else {
scalingWidthFactor = scalingHeightFactor
xOffset = (cropImgView.size.width - (image.size.width / scalingWidthFactor)) / 2
}
case CALayerContentsGravity.resizeAspectFill:
if scalingWidthFactor < scalingHeightFactor {
scalingHeightFactor = scalingWidthFactor
yOffset = (cropImgView.size.height - (image.size.height / scalingHeightFactor)) / 2
}
else {
scalingWidthFactor = scalingHeightFactor
xOffset = (cropImgView.size.width - (image.size.width / scalingWidthFactor)) / 2
}
default:
print("contentsGravity \(String(describing: cropImgView.layer?.contentsGravity)) is not supported")
return nil
}
let width = (self.cropImgView.finalPoint.x - self.cropImgView.startPoint.x) * scalingWidthFactor
let height = (self.cropImgView.startPoint.y - self.cropImgView.finalPoint.y) * scalingHeightFactor
let xPos = (self.cropImgView.startPoint.x - xOffset) * scalingWidthFactor
let yPos = (cropImgView.size.height - self.cropImgView.startPoint.y - yOffset) * scalingHeightFactor
var croppedRect: NSRect = NSRect(x: xPos, y: yPos, width: width, height: height)
Bugfix: cropImgView.finalPoint should be the corner of the selection, not the location of mouseUp. In CropImageView set self.finalPoint = newPoint in mouseDragged instead of mouseUp.
I have some code I can't change that expects to be able to draw at any time. It's the main() function in BackgroundThread below - pretend it can't be modified in any way. Running this will use 70-80% CPU.
If instead of running the thread I replicate what it is doing in View::draw() (i.e. draw 5000 white rectangles at random positions), this will use about 30% CPU.
Where's the difference coming from? Looking at Instruments, although the call stack is the same starting from CGContextFillRect, the View::draw() version only spends 16% of the time doing memset() whereas the threaded version spends 80% of the time.
The code below is the FAST version. Comment out the FAST lines and uncomment the SLOW lines to switch to the SLOW (threaded) version. Compile with swiftc test.swift -otest && ./test. I'm on macOS 10.13, integrated graphics, if that matters.
Is there anything I can do to make the threaded version as fast as the View::draw() version?
import Cocoa
let NSApp = NSApplication.shared,
vwaitSem = DispatchSemaphore(value: 0)
var
mainWindow: NSWindow?,
screen: CGContext?,
link: CVDisplayLink?
class View: NSView, CALayerDelegate {
var lastTime: CFTimeInterval = 0
override var acceptsFirstResponder: Bool {return true}
required init(coder aDecoder: NSCoder) {fatalError("This class does not support NSCoding")}
override func makeBackingLayer() -> CALayer {return CALayer()}
override init(frame: CGRect) {
super.init(frame: frame)
self.wantsLayer = true
self.layer?.contentsScale = 2.0
self.layer?.backgroundColor = CGColor(red:0, green:0, blue:0, alpha: 1)
self.layerContentsRedrawPolicy = NSView.LayerContentsRedrawPolicy.onSetNeedsDisplay // FAST
}
func draw(_ layer: CALayer, in ctx: CGContext) {
let now = CACurrentMediaTime(), timePassed = ((now-lastTime)*1000).rounded()
// NSLog("\(timePassed)")
lastTime = now
ctx.setFillColor(CGColor.white)
ctx.setStrokeColor(CGColor.white)
for _ in 0...5000 {
let rect = CGRect(x: CGFloat(arc4random_uniform(640)+1), y: CGFloat(arc4random_uniform(480)+1), width:6, height:6)
ctx.setFillColor(CGColor.white)
ctx.fill(rect)
}
}
}
func displayLinkOutputCallback(_ displayLink: CVDisplayLink, _ nowPtr: UnsafePointer<CVTimeStamp>,
_ outputTimePtr: UnsafePointer<CVTimeStamp>, _ flagsIn: CVOptionFlags, _ flagsOut: UnsafeMutablePointer<CVOptionFlags>,
_ displayLinkContext: UnsafeMutableRawPointer?) -> CVReturn {
DispatchQueue.main.async {
// mainWindow!.contentView!.layer!.contents = screen!.makeImage() // SLOW
mainWindow!.contentView!.display() // FAST
vwaitSem.signal()
}
return kCVReturnSuccess
}
class BackgroundThread: Thread {
var lastTime: CFTimeInterval = 0
override func main() {
while true {
let now = CACurrentMediaTime(), timePassed = ((now-lastTime)*1000).rounded()
// NSLog("\(timePassed)")
lastTime = now
screen?.clear(CGRect(x:0, y:0, width:640*2, height:480*2))
for _ in 0...5000 {
screen?.setFillColor(CGColor.white)
screen?.setStrokeColor(CGColor.white)
screen?.fill(CGRect(x: CGFloat(arc4random_uniform(640*2)+1), y: CGFloat(arc4random_uniform(480*2)+1), width: 6*2, height: 6*2))
}
vwaitSem.wait()
}
}
}
let width = 640, height = 480,
appMenuItem = NSMenuItem(),
quitMenuItem = NSMenuItem(title:"Quit",
action:#selector(NSApplication.terminate), keyEquivalent:"q"),
window = NSWindow(contentRect:NSMakeRect(0,0, CGFloat(width), CGFloat(height)),
styleMask:[.closable,.titled], backing:.buffered, defer:false),
colorProfile = ColorSyncProfileCreateWithDisplayID(0),
colorSpace = CGColorSpace(platformColorSpaceRef: colorProfile!.toOpaque()),
screen_ = CGContext(data: nil, width: Int(width)*2, height:Int(height)*2, bitsPerComponent:8, bytesPerRow: 0,
space: colorSpace!, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue),
backgroundThread = BackgroundThread()
NSApp.setActivationPolicy(NSApplication.ActivationPolicy.regular)
NSApp.mainMenu = NSMenu()
NSApp.mainMenu?.addItem(appMenuItem)
appMenuItem.submenu = NSMenu()
appMenuItem.submenu?.addItem(quitMenuItem)
window.cascadeTopLeft(from:NSMakePoint(20,20))
window.makeKeyAndOrderFront(nil)
window.contentView = View()
window.makeFirstResponder(window.contentView)
NSApp.activate(ignoringOtherApps:true)
mainWindow = window
screen = screen_
CVDisplayLinkCreateWithCGDisplay(CGMainDisplayID(), &link)
CVDisplayLinkSetOutputCallback(link!, displayLinkOutputCallback, UnsafeMutableRawPointer(Unmanaged.passUnretained(window).toOpaque()))
CVDisplayLinkStart(link!)
// backgroundThread.start() // SLOW
NSApp.run()
I misread the note in the documentation for makeImage() and thought it would not copy the data unless it really had to. Well, Instruments shows it does copy the data. Every single frame.
So I switched to Metal and now I can draw from the background thread with the same performance/CPU usage as with CGContext alone, with no copies as far as I can tell.
Here's some working code:
import Cocoa
import MetalKit
class View: MTKView {
var screen: CGContext?
var commandQueue: MTLCommandQueue?
var buffer: MTLBuffer?
var texture: MTLTexture?
var vwaitSem = DispatchSemaphore(value: 0)
var backgroundThread: Thread?
var allocationSize = 0
func alignUp(size: Int, align: Int) -> Int {return (size+(align-1)) & ~(align-1)}
override var acceptsFirstResponder: Bool {return true}
required init(coder aDecoder: NSCoder) {fatalError("This class does not support NSCoding")}
init() {super.init(frame: CGRect(x:0, y:0, width:0, height: 0), device: MTLCreateSystemDefaultDevice())}
override func viewDidMoveToWindow() {
layer?.contentsScale = NSScreen.main!.backingScaleFactor
let metalLayer = layer as! CAMetalLayer
let pixelRowAlignment = metalLayer.device!.minimumLinearTextureAlignment(for: metalLayer.pixelFormat)
let bytesPerRow = alignUp(size: Int(layer!.frame.width)*Int(layer!.contentsScale)*4, align: pixelRowAlignment)
let pagesize = Int(getpagesize())
var data: UnsafeMutableRawPointer? = nil
allocationSize = alignUp(size: bytesPerRow*Int(layer!.frame.height)*Int(layer!.contentsScale), align: pagesize)
posix_memalign(&data, pagesize, allocationSize)
let colorProfile = ColorSyncProfileCreateWithDisplayID(0),
colorSpace = CGColorSpace(platformColorSpaceRef: colorProfile!.toOpaque()),
screen_ = CGContext(data: data,
width: Int(layer!.frame.width)*Int(layer!.contentsScale),
height: Int(layer!.frame.height)*Int(layer!.contentsScale),
bitsPerComponent:8, bytesPerRow: bytesPerRow,
space: colorSpace!, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)!,
buffer_ = metalLayer.device!.makeBuffer(bytesNoCopy: data!, length: allocationSize, options: .storageModeManaged,
deallocator: { pointer, length in free(self.screen!.data!) })!,
textureDescriptor = MTLTextureDescriptor()
textureDescriptor.pixelFormat = metalLayer.pixelFormat
textureDescriptor.width = screen_.width
textureDescriptor.height = screen_.height
textureDescriptor.storageMode = buffer_.storageMode
textureDescriptor.usage = MTLTextureUsage(rawValue: MTLTextureUsage.shaderRead.rawValue)
texture = buffer_.makeTexture(descriptor: textureDescriptor, offset: 0, bytesPerRow: screen_.bytesPerRow)
commandQueue = device?.makeCommandQueue()
screen = screen_
buffer = buffer_
backgroundThread = BackgroundThread(screen: screen!, vwaitSem: vwaitSem)
backgroundThread!.start()
}
override func draw(_ dirtyRect: NSRect) {
if let drawable = currentDrawable {
buffer!.didModifyRange(0..<allocationSize)
texture!.replace(region: MTLRegionMake2D(0,0, screen!.width, screen!.height),
mipmapLevel:0, slice:0, withBytes: screen!.data!, bytesPerRow: screen!.bytesPerRow, bytesPerImage: 0)
let commandBuffer = commandQueue!.makeCommandBuffer()!
let blitPass = commandBuffer.makeBlitCommandEncoder()!
blitPass.copy(from: texture!, sourceSlice:0, sourceLevel:0, sourceOrigin: MTLOrigin(x:0,y:0,z:0),
sourceSize: MTLSize(width:screen!.width, height:screen!.height, depth: 1),
to: drawable.texture, destinationSlice:0, destinationLevel:0, destinationOrigin: MTLOrigin(x:0,y:0,z:0))
blitPass.endEncoding()
if let renderPass = currentRenderPassDescriptor {
renderPass.colorAttachments[0].texture = drawable.texture
renderPass.colorAttachments[0].loadAction = .load
commandBuffer.makeRenderCommandEncoder(descriptor: renderPass)!.endEncoding()
commandBuffer.addCompletedHandler {cb in self.vwaitSem.signal()}
commandBuffer.present(drawable)
commandBuffer.commit()
}
}
}
}
class BackgroundThread: Thread {
var screen: CGContext
var vwaitSem: DispatchSemaphore
var x = 0
init(screen:CGContext, vwaitSem:DispatchSemaphore) {
self.screen = screen
self.vwaitSem = vwaitSem
}
override func main() {
while true {
// screen.clear(CGRect(x:0,y:0, width:screen.width, height:screen.height))
// screen.setFillColor(CGColor.white)
// screen.fill(CGRect(x:x, y:0, width:100, height:100))
// x += 1
screen.clear(CGRect(x:0,y:0, width:screen.width, height:screen.height))
screen.setFillColor(CGColor.white)
let screenWidth = UInt32(screen.width), screenHeight = UInt32(screen.height)
for _ in 0...5000 {
let rect = CGRect(x: CGFloat(arc4random_uniform(screenWidth+1)),
y: CGFloat(arc4random_uniform(screenHeight+1)), width:6, height:6)
screen.fill(rect)
}
vwaitSem.wait()
}
}
}
let width = 640, height = 480,
appMenuItem = NSMenuItem(),
quitMenuItem = NSMenuItem(title:"Quit",
action:#selector(NSApplication.terminate), keyEquivalent:"q"),
window = NSWindow(contentRect:NSMakeRect(0,0, CGFloat(width), CGFloat(height)),
styleMask:[.closable,.titled], backing:.buffered, defer:false)
NSApp.setActivationPolicy(NSApplication.ActivationPolicy.regular)
NSApp.mainMenu = NSMenu()
NSApp.mainMenu?.addItem(appMenuItem)
appMenuItem.submenu = NSMenu()
appMenuItem.submenu?.addItem(quitMenuItem)
window.cascadeTopLeft(from:NSMakePoint(20,20))
window.makeKeyAndOrderFront(nil)
window.contentView = View()
window.makeFirstResponder(window.contentView)
NSApp.activate(ignoringOtherApps:true)
NSApp.run()
I'm getting into Swift by using the Xcode Playground, and I'm following an online course regarding simple image filtering.
So far, here's my code:
import UIKit
let image = UIImage(named: "sample")!
let rgbaImage = RGBAImage(image: image)
class ImageInfo {
let rgbaImage = RGBAImage(image: image)
func getAvgCols() -> [String:Int] {
var totalRed = 0
var totalGreen = 0
var totalBlue = 0
var avgCol = [String:Int]()
for y in 0..<rgbaImage!.height{
for x in 0..<rgbaImage!.width{
let index = y * rgbaImage!.width + x
let pixel = rgbaImage!.pixels[index]
totalRed += Int(pixel.red)
totalGreen += Int(pixel.green)
totalBlue += Int(pixel.blue)
}
}
let pixelCount = rgbaImage!.width * rgbaImage!.height
avgCol["Red"] = (totalRed / pixelCount)
avgCol["Green"] = (totalGreen / pixelCount)
avgCol["Blue"] = (totalBlue / pixelCount)
return avgCol
}
}
let imageInfo = ImageInfo()
var avgRed = imageInfo.getAvgCols()["Red"]!
var avgGreen = imageInfo.getAvgCols()["Green"]!
var avgBlue = imageInfo.getAvgCols()["Blue"]!
var modifier = 20
// IMAGE EDITING LOOP
for y in 0..<rgbaImage!.height{
for x in 0..<rgbaImage!.width{
let index = y * rgbaImage!.width + x
var pixel = rgbaImage!.pixels[index]
let redDelta = Int(pixel.red) - avgRed
let greenDelta = Int(pixel.green) - avgGreen
let blueDelta = Int(pixel.blue) - avgBlue
let redModifier = Double(modifier) * 0.49
let greenModifier = Double(modifier) * 0.25
let blueModifier = Double(modifier) * 0.07
pixel.red = UInt8(max(min(255, avgRed + Int(redModifier) * redDelta),0))
pixel.green = UInt8(max(min(255, avgGreen + Int(greenModifier) * greenDelta),0))
pixel.blue = UInt8(max(min(255, avgBlue + Int(blueModifier) * blueDelta),0))
rgbaImage!.pixels[index] = pixel
}
}
let newImage = rgbaImage!.toUIImage()!
So far, that works well. The last bit of code modifies the image on a per-pixel basis, and the end result is a poor-man's attempt at some sepia colour.
The problem, however, is when I stick the entire image editing loop in another class, like so:
class ImageEdit {
let imageInfo = ImageInfo()
var avgRed = imageInfo.getAvgCols()["Red"]!
var avgGreen = imageInfo.getAvgCols()["Green"]!
var avgBlue = imageInfo.getAvgCols()["Blue"]!
func applyFilter(filter: String, modifier: Int) -> UIImage{
for y in 0..<rgbaImage!.height{
for x in 0..<rgbaImage!.width{
let index = y * rgbaImage!.width + x
var pixel = rgbaImage!.pixels[index]
let redDelta = Int(pixel.red) - avgRed
let greenDelta = Int(pixel.green) - avgGreen
let blueDelta = Int(pixel.blue) - avgBlue
let redModifier = Double(modifier) * 0.49
let greenModifier = Double(modifier) * 0.25
let blueModifier = Double(modifier) * 0.07
pixel.red = UInt8(max(min(255, avgRed + Int(redModifier) * redDelta),0))
pixel.green = UInt8(max(min(255, avgGreen + Int(greenModifier) * greenDelta),0))
pixel.blue = UInt8(max(min(255, avgBlue + Int(blueModifier) * blueDelta),0))
rgbaImage!.pixels[index] = pixel
}
}
let newImage = rgbaImage!.toUIImage()!
return newImage
}
}
NOTE: Since I am using the play ground, both classes are in the same file. For clarification just take my previous code, and replace everything below class ImageInfo with class ImageEdit
I would need access to avgRed, avgGreen, and avgBlue, but all I'm getting is
instance member 'imageInfo' cannot be used on type 'ImageEdit'
and since I'm a noob at Swift, I'm not exactly sure how to fix it.
Did I miss anything?
EDIT: Here's RGBAImage. I did not write this code, it is free to download to anyone who takes the online course.
import UIKit
public struct Pixel {
public var value: UInt32
public var red: UInt8 {
get {
return UInt8(value & 0xFF)
}
set {
value = UInt32(newValue) | (value & 0xFFFFFF00)
}
}
public var green: UInt8 {
get {
return UInt8((value >> 8) & 0xFF)
}
set {
value = (UInt32(newValue) << 8) | (value & 0xFFFF00FF)
}
}
public var blue: UInt8 {
get {
return UInt8((value >> 16) & 0xFF)
}
set {
value = (UInt32(newValue) << 16) | (value & 0xFF00FFFF)
}
}
public var alpha: UInt8 {
get {
return UInt8((value >> 24) & 0xFF)
}
set {
value = (UInt32(newValue) << 24) | (value & 0x00FFFFFF)
}
}
}
public struct RGBAImage {
public var pixels: UnsafeMutableBufferPointer<Pixel>
public var width: Int
public var height: Int
public init?(image: UIImage) {
guard let cgImage = image.CGImage else { return nil }
// Redraw image for correct pixel format
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.ByteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.PremultipliedLast.rawValue & CGBitmapInfo.AlphaInfoMask.rawValue
width = Int(image.size.width)
height = Int(image.size.height)
let bytesPerRow = width * 4
let imageData = UnsafeMutablePointer<Pixel>.alloc(width * height)
guard let imageContext = CGBitmapContextCreate(imageData, width, height, 8, bytesPerRow, colorSpace, bitmapInfo) else { return nil }
CGContextDrawImage(imageContext, CGRect(origin: CGPointZero, size: image.size), cgImage)
pixels = UnsafeMutableBufferPointer<Pixel>(start: imageData, count: width * height)
}
public func toUIImage() -> UIImage? {
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.ByteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.PremultipliedLast.rawValue & CGBitmapInfo.AlphaInfoMask.rawValue
let bytesPerRow = width * 4
let imageContext = CGBitmapContextCreateWithData(pixels.baseAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo, nil, nil)
guard let cgImage = CGBitmapContextCreateImage(imageContext) else {return nil}
let image = UIImage(CGImage: cgImage)
return image
}
}
The problem is that you are calling the attribute imageInfo inside the ImageEdit before initialising the class.
It is only when the initialisation is called that everything becomes something. So avgRed can't get values from a function of imageInfo because it has not been constructed yet.
Doing this inside a function (init() is basically a function) is just find. Doing this inside a class doesn't work.
This does not work :
class A {
let attribute : Int = 0
init() {
}
}
class B {
let attributeA = A()
let attributeB = attributeA.attribute // this is your problem
}
This does :
For all the attributes that rely on other attributes to be constructed, you use a late init. All the attributes that rely on external input, failable functions,... you declare as an optional.
Late init : var / let attrName : Type
Optional : var / let attrName : Type ? / !
class B {
let attributeA = A()
let attributeB : Int
var attributeC : Int?
init () {
attributeB = attributeA.attribute
attributeC = somethingOptional()
}
func somethingOptional() -> Int? {
return 0 // not really optional, but just illustration
}
}
So for you it would be something like this :
class ImageEdit {
let imageInfo = ImageInfo()
let dict : [String:Int]
init () {
dict = imageInfo.getAvgCols()
}
}