How to solve SpriteKit SKTexture's CGImage "Context leak"? - swift

Xcode 11-12, MacOS Catalina, Swift 5 project...
This code is produced "Context leak detected, msgtracer returned -1" error:
import Foundation
import SpriteKit
import GameKit
func getTextureImage(_ index: Int, isSky: Bool = true) -> CGImage {
let perlinSource = GKPerlinNoiseSource(
frequency: 1,
octaveCount: 7,
persistence: 0.5,
lacunarity: 2.0,
seed: 123
)
let noise = GKNoise(perlinSource)
noise.gradientColors = [
-1.0: NSColor(red: 0.0, green: 0.15, blue: 0.85, alpha: 1.0),
1.0 : NSColor(red: 1, green: 1, blue: 1, alpha: 1.0),
]
noise.move(by: vector_double3(Double(index), 0, 0))
let noiseMap = GKNoiseMap(noise,
size: SIMD2(repeating: 1),
origin: SIMD2(repeating: 0.0),
sampleCount: SIMD2(repeating: 16),
seamless: false)
let t = SKTexture(noiseMap: noiseMap)
let i = t.cgImage()
return i
}
for x in 0...128 {
let texture = getTextureImage(x)
}
How to solve that error at line with .cgImage() method call?
I need CGImage data from colored GKNoiseMap, then I will use it with MetalKit to load data into MTLTexture with
MTKTextureLoader.newTexture(cgImage: texture, options: textureLoaderOptions)

Yeahh, of course I tried
autoreleasepool {
let t = getTextureImage(x)
}
and solved problem :) So, you have to use autoreleasepool with NSImage and CGImage in Swift API

Related

Why are the shades of black in a custom SCNGeometry lighter than in an SCNSphere

In this scene
the column of spheres on the left are created using SCNSphere()
the column of 'circles' on the right are created using SCNGeometry() using .point primitives
there is only an ambient light source
all geometries are using a .constant lighting model
each pair of spheres uses the same RGB values to define the colour.
Why are the shades of black for the last two pairs lighter for the circles than their equivalient sphere?
Full playground code to reproduce this images:
Creating the scene
import UIKit
import SceneKit
import PlaygroundSupport
// Constants I'm using for the darkest grey colour
let B_RED: CGFloat = 0.05
let B_GREEN: CGFloat = 0.05
let B_BLUE: CGFloat = 0.05
let B_ALPHA: CGFloat = 1.0
let BLACK_COLOUR = UIColor(red: B_RED, green: B_GREEN, blue: B_BLUE, alpha: B_ALPHA)
let scene: SCNScene = {
let s = SCNScene()
return s
}()
let sceneView: SCNView = {
let v = SCNView(frame: CGRect(x: 0, y: 0, width: 600, height: 800))
v.scene = scene
v.backgroundColor = UIColor(white: 0.66, alpha: 1.0)
v.allowsCameraControl = true
v.debugOptions = [SCNDebugOptions.showLightInfluences]
v.backgroundColor
return v
}()
let ambientLigntNode: SCNNode = {
let n = SCNNode()
n.light = SCNLight()
n.light!.type = SCNLight.LightType.ambient
n.light!.color = UIColor(white: 1, alpha: 1.0)
return n
}()
PlaygroundPage.current.liveView = sceneView
// a camera
var cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
cameraNode.simdPosition = simd_float3(0,0,8)
scene.rootNode.addChildNode(cameraNode)
scene.rootNode.addChildNode(ambientLigntNode)
The column of spheres
// ----------------------------------------------------
// White Sphere
let whiteSphere = SCNSphere(radius: 0.3)
let whiteMaterial = SCNMaterial()
whiteMaterial.diffuse.contents = simd_float3(1,1,1)
whiteMaterial.lightingModel = .constant
whiteSphere.materials = [whiteMaterial]
let whiteSphereNode = SCNNode(geometry: whiteSphere)
whiteSphereNode.simdPosition = simd_float3(-1,2,0)
scene.rootNode.addChildNode(whiteSphereNode)
// ----------------------------------------------------
// Black Sphere
let blackSphere = SCNSphere(radius: 0.3)
let blackMaterial = SCNMaterial()
blackMaterial.diffuse.contents = BLACK_COLOUR
blackMaterial.lightingModel = .constant
blackSphere.materials = [blackMaterial]
let blackSphereNode = SCNNode(geometry: blackSphere)
blackSphereNode.simdPosition = simd_float3(-1,-2,0)
scene.rootNode.addChildNode(blackSphereNode)
// ----------------------------------------------------
// Red Sphere
let redSphere = SCNSphere(radius: 0.3)
let redMaterial = SCNMaterial()
redMaterial.diffuse.contents = UIColor(
red: 1.0,
green: 0.0,
blue: 0.0,
alpha: 1.0
)
redMaterial.lightingModel = .constant
redSphere.materials = [redMaterial]
let redSphereNode = SCNNode(geometry: redSphere)
redSphereNode.simdPosition = simd_float3(-1, 1, 0)
scene.rootNode.addChildNode(redSphereNode)
// ----------------------------------------------------
// Green Sphere
let greenSphere = SCNSphere(radius: 0.3)
let greenMaterial = SCNMaterial()
greenMaterial.diffuse.contents = UIColor(
red: 0.0,
green: 1.0,
blue: 0.0,
alpha: 1.0
)
greenMaterial.lightingModel = .constant
greenSphere.materials = [greenMaterial]
let greenSphereNode = SCNNode(geometry: greenSphere)
greenSphereNode.simdPosition = simd_float3(-1, 0, 0)
scene.rootNode.addChildNode(greenSphereNode)
// ----------------------------------------------------
// Blue Sphere
let blueSphere = SCNSphere(radius: 0.3)
let blueMaterial = SCNMaterial()
blueMaterial.diffuse.contents = UIColor(
red: 0.0,
green: 0.0,
blue: 1.0,
alpha: 1.0
)
blueMaterial.lightingModel = .constant
blueSphere.materials = [blueMaterial]
let blueSphereNode = SCNNode(geometry: blueSphere)
blueSphereNode.simdPosition = simd_float3(-1, -1, 0)
scene.rootNode.addChildNode(blueSphereNode)
// ----------------------------------------------------
// Grey Sphere
let greySphere = SCNSphere(radius: 0.3)
let greyMaterial = SCNMaterial()
greyMaterial.diffuse.contents = UIColor(
red: 0.5,
green: 0.5,
blue: 0.5,
alpha: 1.0
)
greyMaterial.lightingModel = .constant
greySphere.materials = [greyMaterial]
let greySphereNode = SCNNode(geometry: greySphere)
greySphereNode.simdPosition = simd_float3(-1, -3, 0)
scene.rootNode.addChildNode(greySphereNode)
Column of circles
// ----------------------------------------------------
// Custom SCNGeometry using vertex data
struct Vertex {
let x: Float
let y: Float
let z: Float
let r: Float
let g: Float
let b: Float
let a: Float
}
let vertices: [Vertex] = [
Vertex(x: 0.0, y: 2.0, z: 0.0, r: 1.0, g: 1.0, b: 1.0, a: 1.0), // white
Vertex(x: 0.0, y: 1.0, z: 0.0, r: 1.0, g: 0.0, b: 0.0, a: 1.0), // red
Vertex(x: 0.0, y: 0.0, z: 0.0, r: 0.0, g: 1.0, b: 0.0, a: 1.0), // green
Vertex(x: 0.0, y: -1.0, z: 0.0, r: 0.0, g: 0.0, b: 1.0, a: 1.0), // blue
Vertex(x: 0.0, y: -3.0, z: 0.0, r: 0.5, g: 0.5, b: 0.5, a: 1.0), // rgb
Vertex(
x: 0.0, y: -2.0, z: 0.0,
r: Float(B_RED), g: Float(B_GREEN), b: Float(B_BLUE), a: Float(B_ALPHA)
)
]
let vertexData = Data(
bytes: vertices,
count: MemoryLayout<Vertex>.size * vertices.count
)
let positionSource = SCNGeometrySource(
data: vertexData,
semantic: SCNGeometrySource.Semantic.vertex,
vectorCount: vertices.count,
usesFloatComponents: true,
componentsPerVector: 3,
bytesPerComponent: MemoryLayout<Float>.size,
dataOffset: 0,
dataStride: MemoryLayout<Vertex>.size
)
let colourSource = SCNGeometrySource(
data: vertexData,
semantic: SCNGeometrySource.Semantic.color,
vectorCount: vertices.count,
usesFloatComponents: true,
componentsPerVector: 4,
bytesPerComponent: MemoryLayout<Float>.size,
dataOffset: MemoryLayout<Float>.size * 3,
dataStride: MemoryLayout<Vertex>.size
)
let elements = SCNGeometryElement(
data: nil,
primitiveType: .point,
primitiveCount: vertices.count,
bytesPerIndex: MemoryLayout<Int>.size
)
elements.pointSize = 100
elements.minimumPointScreenSpaceRadius = 100
elements.maximumPointScreenSpaceRadius = 100
let spheres = SCNGeometry(
sources: [positionSource, colourSource],
elements: [elements]
)
let sphereNode = SCNNode(geometry: spheres)
let sphereMaterial = SCNMaterial()
sphereMaterial.lightingModel = .constant
spheres.materials = [sphereMaterial]
sphereNode.simdPosition = simd_float3(0,0,0)
scene.rootNode.addChildNode(sphereNode)
Updated based on accepted answer. Converting from sRGB to LinearRGB gives the same result
func srgbToLinear(x: Float) -> Float {
if x <= 0.04045 {
return x / 12.92;
}
return powf((x + 0.055) / 1.055, 2.4)
}
let vertices: [Vertex] = [
Vertex(x: 0.0, y: 2.0, z: 0.0, r: 1.0, g: 1.0, b: 1.0, a: 1.0), // white
Vertex(x: 0.0, y: 1.0, z: 0.0, r: 1.0, g: 0.0, b: 0.0, a: 1.0), // red
Vertex(x: 0.0, y: 0.0, z: 0.0, r: 0.0, g: 1.0, b: 0.0, a: 1.0), // green
Vertex(x: 0.0, y: -1.0, z: 0.0, r: 0.0, g: 0.0, b: 1.0, a: 1.0), // blue
Vertex(x: 0.0, y: -3.0, z: 0.0, r: srgbToLinear(x: 0.5), g: srgbToLinear(x: 0.5), b: srgbToLinear(x: 0.5), a: 1.0), // rgb
Vertex(
x: 0.0, y: -2.0, z: 0.0,
r: srgbToLinear(x: Float(B_RED)),
g: srgbToLinear(x: Float(B_GREEN)),
b: srgbToLinear(x: Float(B_GREEN)),
a: 1.0
)
]
This would be the case if the two types of graphic are generated in different color spaces. For example, if gray for the spheres is interpreted as being in the sRGB as the color space, and the circle are interpreted as generic RGB space then you would see a different in color values.
Consider the following playground code:
//: A UIKit based Playground for presenting user interface
import UIKit
import PlaygroundSupport
import CoreGraphics
class MyViewController : UIViewController {
override func loadView() {
let view = CustomView()
view.backgroundColor = UIColor.darkGray
self.view = view
}
}
class CustomView : UIView {
override func draw(_ rect: CGRect) {
if let cgContext = UIGraphicsGetCurrentContext() {
cgContext.saveGState()
let gray : [CGFloat] = [0.5, 0.5, 0.5, 1.0]
let srgbGray = CGColor(
colorSpace: CGColorSpace(name: CGColorSpace.sRGB)!, components: gray)!
cgContext.setFillColor(srgbGray)
cgContext.fillEllipse(in: CGRect(x:10, y:20, width:72, height: 72))
let genericGray = CGColor(colorSpace: CGColorSpace(name: CGColorSpace.genericRGBLinear)!, components: gray)!
cgContext.setFillColor(genericGray)
cgContext.fillEllipse(in: CGRect(x:110, y:20, width:72, height: 72))
cgContext.restoreGState()
}
}
}
// Present the view controller in the Live View window
PlaygroundPage.current.liveView = MyViewController()
It produces an effect almost identical to the one you are seeing. I suspect that the circles are drawn assuming a generic color space and the spheres are drawn assuming sRGB.

Cannot convert return expression of type to return type 'AnyClass' (aka 'AnyObject.Type')

I am trying to generate watermark with PDFKit
I have got the official code
But I want to make his writing more dynamic
Allows me to enter the watermark text
But I have some problems with return AnyClass
Watermark code
import Foundation
import PDFKit
class WatermarkPage: PDFPage {
var watermark : NSString
init(watermark : NSString) {
self.watermark = watermark
super.init()
}
override func draw(with box: PDFDisplayBox, to context: CGContext) {
super.draw(with: box, to: context)
UIGraphicsPushContext(context)
context.saveGState()
let pageBounds = self.bounds(for: box)
context.translateBy(x: pageBounds.size.width, y: pageBounds.size.height)
context.scaleBy(x: 1.0, y: -1.0)
let string3: NSString = watermark
let attributes3: [NSAttributedString.Key: Any] = [
NSAttributedString.Key.foregroundColor: #colorLiteral(red: 0, green: 0, blue: 0, alpha: 1),
NSAttributedString.Key.font: UIFont.boldSystemFont(ofSize: 12)
]
string3.draw(at: CGPoint(x: -250, y: 750), withAttributes: attributes3)
context.restoreGState()
UIGraphicsPopContext()
}
Here is where I encountered the problem
internal func classForPage() -> AnyClass {
let page = WatermarkPage(watermark: "Test text")
return page.self
}
Swift AnyClass is an alias of AnyObject.Type, you might want to try using just AnyObject here:
internal func classForPage() -> AnyObject {
let page = WatermarkPage(watermark: "Test text")
return page.self
}

How to multiply a color to an image in Swift?

I'm back with other doubt about moving my app from Android to iOS (which it's more difficult that I could think...).
Ok, so in this case I'm trying to colorized the next image:
In Android I used the next code:
public static Bitmap doNalaFilter (Bitmap src) {
Bitmap bmBrown0 = Bitmap.createBitmap(src.getWidth(), src.getHeight(), src.getConfig());
Canvas cBrown0 = new Canvas(bmBrown0);
Paint paintBrown0 = new Paint();
paintBrown0.setColorFilter(new LightingColorFilter(Color.WHITE, 0x800000));
cBrown0.drawBitmap(bmTemp, 0, 0, paintBrown0);
Bitmap bmBrown = Bitmap.createBitmap(src.getWidth(), src.getHeight(), src.getConfig());
Canvas cBrown = new Canvas(bmBrown);
cBrown.drawBitmap(src, 0, 0, null);
Paint paintBrown = new Paint();
paintBrown.setXfermode(new PorterDuffXfermode(PorterDuff.Mode.MULTIPLY));
cBrown.drawBitmap(bmBrown0, 0, 0, paintBrown);
return bmBrown;
}
Getting the next image:
But in Swift I'm trying the next code:
func nalaFilter() -> UIImage? {
let inImage = CIImage (image: self)
let SRGBImage = inImage?.applyingFilter("CILinearToSRGBToneCurve")
dynamic let brownMatrix = CIFilter (name: "CIMultiplyBlendMode")
let brownRect = CGRect (
x: (SRGBImage?.extent.origin.x)!,
y: (SRGBImage?.extent.origin.y)!,
width: (SRGBImage?.extent.size.width)!,
height: (SRGBImage?.extent.size.height)!)
let brownColor = CIColor (red: 128.0/255.0, green: 0.0, blue: 0.0)
let brownOverlay = CIImage (color: brownColor)
let brownCroppedImage = brownOverlay.cropped(to: brownRect)
brownMatrix?.setValue(SRGBImage, forKey: kCIInputImageKey)
brownMatrix?.setValue(brownCroppedImage, forKey: kCIInputBackgroundImageKey)
let brownOutImage = brownMatrix?.outputImage
let linearImage = brownOutImage?.applyingFilter("CISRGBToneCurveToLinear")
let cgImage = CIContext().createCGImage(linearImage!, from: linearImage!.extent)
return UIImage (cgImage: cgImage!)
}
And I'm getting this!
Does anybody have an idea about one Swift code which can work on the same way that in Android?
Thanks in advance!
Finally I got the next code which solves my problem:
func nalaFilter() -> UIImage? {
let brownColor = CIColor (red: 128.0/255.0, green: 0.0, blue: 0.0)
let brownRect = CGRect (origin: .zero, size: self.size)
//Colored image
UIGraphicsBeginImageContextWithOptions(brownRect.size, true, 0.0)
brownColor.setFill()
UIRectFill(brownRect)
let brownColoredImage = UIGraphicsGetImageFromCurrentImageContext()
let brownContext = UIGraphicsGetCurrentContext()
brownContext!.setFillColor(UIColor.white.cgColor)
brownContext!.fill(brownRect)
self.draw(in: brownRect, blendMode: .normal, alpha: 1)
brownColoredImage?.draw(in: brownRect, blendMode: .colorDodge, alpha: 1)
let outBrown0Image = UIGraphicsGetImageFromCurrentImageContext()
//Multiplied image
self.draw(in: brownRect, blendMode: .normal, alpha: 1)
outBrown0Image?.draw(in: brownRect, blendMode: .multiply, alpha: 1)
let outBrownImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return outBrownImage
}
This gets exactly the same result than my Android one.

swift: Which filter should I choose, if I want to normalize image?

Like the topic, I'm using swift to predict model, and someone suggest me to use CoreImage to subtract mean value of input image, but it's seems I can't do this with only one filter. So which filter should I choose, if I want to normalize a image?
So far, I've tried to do this without CoreImage
let oriimage = self.convert(cmage: ciimage)
let uimean:UIImage = UIImage(color: oriimage.averageColor(), size: CGSize(width: oriimage.size.width, height: oriimage.size.width))!
let cimean:CIImage = CIImage(image:uimean)!
ciimage.composited(over: cimean)
let image = self.convert(cmage: ciimage)
However, it's more like combine two images together but subtract.
I look around CoreImage framework filter, I find that CIColorMatrix can work.
extension UIImage {
func colorized(with color: UIColor) -> UIImage? {
guard
let ciimage = CIImage(image: self),
let colorMatrix = CIFilter(name: "CIColorMatrix")
else { return nil }
var r: CGFloat = 1, g: CGFloat = 1, b: CGFloat = 1, a: CGFloat = 1
color.getRed(&r, green: &g, blue: &b, alpha: &a)
colorMatrix.setDefaults()
colorMatrix.setValue(ciimage, forKey: "inputImage")
colorMatrix.setValue(CIVector(x: 1, y: 0, z: 0, w: 0), forKey: "inputRVector")
colorMatrix.setValue(CIVector(x: 0, y: 1, z: 0, w: 0), forKey: "inputGVector")
colorMatrix.setValue(CIVector(x: 0, y: 0, z: 1, w: 0), forKey: "inputBVector")
colorMatrix.setValue(CIVector(x: 0, y: 0, z: 0, w: 1), forKey: "inputAVector")
colorMatrix.setValue(CIVector(x: -r, y: -g, z: -b, w: 0), forKey: "inputBiasVector")
if let ciimage = colorMatrix.outputImage {
return UIImage(ciImage: ciimage)
}
return nil
}
}
let image = oriimage.colorized(with: color) ?? oriimage
let screensize:CGSize = CGSize(width: 720, height: 1280)
var face = image.cropped(boundingBox: facerect)
var left = image.cropped(boundingBox: leftrect)
var right = image.cropped(boundingBox: rightrect)
let col = 64,row = 64,c=3
face = face.resizeImageWith(newSize: CGSize(width:col,height:row))
However, the other problem is coming on the last line. The result return nil and stop the application, but when I display the image, the image is good.

SpriteKit - Creating Changing Gradients

Currently I am creating a gradient using CIFilter. Code:
//In update function
let context = CIContext(options: nil)
let filter = CIFilter(name: "CILinearGradient")
let startVector = CIVector(x: size.width * 0.5, y: 0)
let endVector = CIVector(x: size.width * 0.5, y: size.height)
let color1 = CIColor(red: red1/255, green: green1/255, blue: blue1/255, alpha: 1)
let color2 = CIColor(red: red2/255, green: green2/255, blue: blue2/255, alpha: 1)
//Here I would change all rgb values by +- 0.2 - 0.5
filter!.setDefaults()
filter!.setValue(startVector, forKey: "inputPoint0")
filter!.setValue(endVector, forKey: "inputPoint1")
filter!.setValue(color1, forKey: "inputColor0")
filter!.setValue(color2, forKey: "inputColor1")
let image = context.createCGImage(filter!.outputImage!, from: CGRect(x: 0, y: 0, width: size.width, height: size.height))
let texture = SKTexture(image: UIImage(cgImage: image!))
backgroundImage.texture = texture
My goal is to create a background gradient that changes color. When I tried to do this using CIFilter, it worked, but my CPU jumped up to 100%, much higher than my usual 20%.
Is there any other way to accomplish this?
Thanks in advance
If the color of the gradients are needed:
I found this very good git that has a lot of functionality. This should help you out.
https://github.com/braindrizzlestudio/BDGradientNode