Can't display texture with sampler - swift

I am trying to display a texture loaded with MTKTextureLoader, I have a buffer that stores my vertices coordinates (I build two triangles to have a rectangle in which to display my image), then I have a buffer that stores the texture coordinates of each vertex.
I made a sampler to sample data from my texture, the problem is that I am getting nothing (black image).
I putted the Swift code just in case my error comes from there, but I think it comes form the Metal code. If you look at my fragment shader, you will see two comments, they show something that I can't understand :
If I give the coordinates directly to the sample function, it works (colours the triangles with the color that corresponds to the given coordinates).
If I give the coordinates I pass to the sampler as color components, it also displays something coherent (triangles coloured in function of the given coordinates).
So it doesn't seem to come from the sampler, nor from the coordinates, that's what I don't understand.
Here is my Swift code :
import Cocoa
import MetalKit
import Metal
class ViewController: NSViewController, MTKViewDelegate {
var device:MTLDevice!
var texture:MTLTexture!
var commandQueue:MTLCommandQueue!
var vertexBuffer:MTLBuffer!
var vertexCoordinates:[Float] = [
-1, 1, 0, 1,
-1, -1, 0, 1,
1, -1, 0, 1,
1,-1,0,1,
1,1,0,1,
-1,1,0,1,
]
var vertexUVBuffer:MTLBuffer!
var vertexUVCoordinates:[Float] = [
0,1,
0,0,
1,0,
1,0,
1,1,
0,1
]
var library:MTLLibrary!
var defaultPipelineState:MTLRenderPipelineState!
var samplerState:MTLSamplerState!
#IBOutlet var metalView: MTKView!
override func viewDidLoad() {
super.viewDidLoad()
device = MTLCreateSystemDefaultDevice()
let textureLoader = MTKTextureLoader(device: device)
metalView.device = device
metalView.delegate = self
metalView.preferredFramesPerSecond = 0
metalView.sampleCount = 4
texture = try! textureLoader.newTextureWithContentsOfURL(NSBundle.mainBundle().URLForResource("abeilles", withExtension: "jpg")!, options: [MTKTextureLoaderOptionAllocateMipmaps:NSNumber(bool: true)])
commandQueue = device.newCommandQueue()
library = device.newDefaultLibrary()
vertexBuffer = device.newBufferWithBytes(&vertexCoordinates, length: sizeof(Float)*vertexCoordinates.count, options: [])
vertexUVBuffer = device.newBufferWithBytes(&vertexUVCoordinates, length: sizeof(Float)*vertexUVCoordinates.count, options: [])
let renderPipelineDescriptor = MTLRenderPipelineDescriptor()
renderPipelineDescriptor.vertexFunction = library.newFunctionWithName("passTroughVertex")
renderPipelineDescriptor.fragmentFunction = library.newFunctionWithName("myFragmentShader")
renderPipelineDescriptor.sampleCount = metalView.sampleCount
renderPipelineDescriptor.colorAttachments[0].pixelFormat = metalView.colorPixelFormat
defaultPipelineState = try! device.newRenderPipelineStateWithDescriptor(renderPipelineDescriptor)
let samplerDescriptor = MTLSamplerDescriptor()
samplerDescriptor.minFilter = .Linear
samplerDescriptor.magFilter = .Linear
samplerDescriptor.mipFilter = .Linear
samplerDescriptor.sAddressMode = .ClampToEdge
samplerDescriptor.rAddressMode = .ClampToEdge
samplerDescriptor.tAddressMode = .ClampToEdge
samplerDescriptor.normalizedCoordinates = true
samplerState = device.newSamplerStateWithDescriptor(samplerDescriptor)
metalView.draw()
// Do any additional setup after loading the view.
}
func drawInMTKView(view: MTKView) {
let commandBuffer = commandQueue.commandBuffer()
let commandEncoder = commandBuffer.renderCommandEncoderWithDescriptor(metalView.currentRenderPassDescriptor!)
commandEncoder.setRenderPipelineState(defaultPipelineState)
commandEncoder.setVertexBuffer(vertexBuffer, offset: 0, atIndex: 0)
commandEncoder.setVertexBuffer(vertexUVBuffer, offset:0, atIndex:1)
commandEncoder.setFragmentSamplerState(samplerState, atIndex: 0)
commandEncoder.setFragmentTexture(texture, atIndex: 0)
commandEncoder.drawPrimitives(MTLPrimitiveType.Triangle, vertexStart: 0, vertexCount: 6, instanceCount: 1)
commandEncoder.endEncoding()
commandBuffer.presentDrawable(metalView.currentDrawable!)
commandBuffer.commit()
}
func mtkView(view: MTKView, drawableSizeWillChange size: CGSize) {
// view.draw()
}
override var representedObject: AnyObject? {
didSet {
// Update the view, if already loaded.
}
}
}
Here is my Metal code :
#include <metal_stdlib>
using namespace metal;
struct VertexOut {
float4 position [[position]];
float2 texCoord;
};
vertex VertexOut passTroughVertex(uint vid [[ vertex_id]],
constant float4 *vertexPosition [[ buffer(0) ]],
constant float2 *vertexUVPos [[ buffer(1)]]) {
VertexOut vertexOut;
vertexOut.position = vertexPosition[vid];
vertexOut.texCoord = vertexUVPos[vid];
return vertexOut;
}
fragment float4 myFragmentShader(VertexOut inFrag [[stage_in]],
texture2d<float> myTexture [[ texture(0)]],
sampler mySampler [[ sampler(0) ]]) {
float4 myColor = myTexture.sample(mySampler,inFrag.texCoord);
// myColor = myTexture.sample(mySampler,float2(1));
// myColor = float4(inFrag.texCoord.r,inFrag.texCoord.g,0,1);
return myColor;
}

You're allocating space for mipmaps but not actually generating them. The docs say that when specifying MTKTextureLoaderOptionAllocateMipmaps, "a full set of mipmap levels are allocated for the texture when the texture is loaded, and it is your responsibility to generate the mipmap contents."
Your sampler configuration causes the resulting texture to be sampled at the base mipmap level as long as the texture is small relative to the rect on the screen, but if you feed in a larger texture, it starts sampling the smaller levels of the mipmap stack, picking up all-black pixels, which are then blended together to either darken the image or cause the output to be entirely black.
You should use the -generateMipmapsForTexture: method on a MTLBlitCommandEncoder to generate a complete set of mipmaps once your texture is loaded.

Related

Converting SCNMatrix4 from swift to Objective-c

I can not for the life of me figure out how to create a SCNMatrix4 from a transform in objective-c.
The swift code I'm trying to use in objective-c:
let affineTransform = frame.displayTransform(for: .portrait, viewportSize: sceneView.bounds.size)
let transform = SCNMatrix4(affineTransform)
faceGeometry.setValue(SCNMatrix4Invert(transform), forKey: "displayTransform")
I got the first and third line but I can't find anyway to create this SCNMatrix4 from the CGAffineTransform.
CGAffineTransform affine = [self.sceneView.session.currentFrame displayTransformForOrientation:UIInterfaceOrientationPortrait viewportSize:self.sceneView.bounds.size];
SCNMatrix4 trans = ??
[f setValue:SCNMatrix4Invert(trans) forKey:#"displayTransform"];
There is no SCNMatrix4Make, I tried simd_matrix4x4 but that didn't seem to work either.
Thank you
edit:
The swift code is from Apples Example project "ARKitFaceExample", this is the full code:
/*
See LICENSE folder for this sample’s licensing information.
Abstract:
Demonstrates using video imagery to texture and modify the face mesh.
*/
import ARKit
import SceneKit
/// - Tag: VideoTexturedFace
class VideoTexturedFace: TexturedFace {
override func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
guard let sceneView = renderer as? ARSCNView,
let frame = sceneView.session.currentFrame,
anchor is ARFaceAnchor
else { return nil }
#if targetEnvironment(simulator)
#error("ARKit is not supported in iOS Simulator. Connect a physical iOS device and select it as your Xcode run destination, or select Generic iOS Device as a build-only destination.")
#else
// Show video texture as the diffuse material and disable lighting.
let faceGeometry = ARSCNFaceGeometry(device: sceneView.device!, fillMesh: true)!
let material = faceGeometry.firstMaterial!
material.diffuse.contents = sceneView.scene.background.contents
material.lightingModel = .constant
guard let shaderURL = Bundle.main.url(forResource: "VideoTexturedFace", withExtension: "shader"),
let modifier = try? String(contentsOf: shaderURL)
else { fatalError("Can't load shader modifier from bundle.") }
faceGeometry.shaderModifiers = [ .geometry: modifier]
// Pass view-appropriate image transform to the shader modifier so
// that the mapped video lines up correctly with the background video.
let affineTransform = frame.displayTransform(for: .portrait, viewportSize: sceneView.bounds.size)
let transform = SCNMatrix4(affineTransform)
faceGeometry.setValue(SCNMatrix4Invert(transform), forKey: "displayTransform")
contentNode = SCNNode(geometry: faceGeometry)
#endif
return contentNode
}
}
In case anyone ever needs this, here is the extension I was missing
extension SCNMatrix4 {
/**
Create a 4x4 matrix from CGAffineTransform, which represents a 3x3 matrix
but stores only the 6 elements needed for 2D affine transformations.
[ a b 0 ] [ a b 0 0 ]
[ c d 0 ] -> [ c d 0 0 ]
[ tx ty 1 ] [ 0 0 1 0 ]
. [ tx ty 0 1 ]
Used for transforming texture coordinates in the shader modifier.
(Needs to be SCNMatrix4, not SIMD float4x4, for passing to shader modifier via KVC.)
*/
init(_ affineTransform: CGAffineTransform) {
self.init()
m11 = Float(affineTransform.a)
m12 = Float(affineTransform.b)
m21 = Float(affineTransform.c)
m22 = Float(affineTransform.d)
m41 = Float(affineTransform.tx)
m42 = Float(affineTransform.ty)
m33 = 1
m44 = 1
}
}
To replicate the Swift extension for creating an SCNMatrix4 from a CGAffineTransform you can implement the following function:
Some .h file:
extern SCNMatrix4 SCNMatrix4FromTransform(CGAffineTransform transform);
Some .m file:
SCNMatrix4 SCNMatrix4FromTransform(CGAffineTransform transform) {
SCNMatrix4 matrix;
matrix.m11 = transform.a;
matrix.m12 = transform.b;
matrix.m21 = transform.c;
matrix.m22 = transform.d;
matrix.m41 = transform.tx;
matrix.m42 = transform.ty;
matrix.m33 = 1;
matrix.m44 = 1;
return matrix;
}
Then your code becomes:
CGAffineTransform affineTransform = [self.sceneView.session.currentFrame displayTransformForOrientation:UIInterfaceOrientationPortrait viewportSize:self.sceneView.bounds.size];
SCNMatrix4 transform = SCNMatrixFromTransform(affineTransform);
[f setValue:[NSValue valueWithSCNMatrix4:SCNMatrix4Invert(transform)] forKey:#"displayTransform"];
Note the use of NSValue valueWithSCNMatrix4:. This is needed to convert the struct to an object and should satisfy the use of KVC for setting the displayTransform property.

Rough Metal rasterization

I followed a tutorial to see how to draw a triangle in Metal. I am beginning metal, and what the issue is is that, the triangle has really rough edges. It's like the rasterizer is cutting corners. It looks pixelated, and the pixels of the triangle edges are much larger than my screen's pixels. How can I rasterize it smoother if I am using rasterization correctly?
import Cocoa
import MetalKit
class ViewController: NSViewController {
var MetalView: MTKView {
return view as! MTKView
}
var Device: MTLDevice!
var CommandQue: MTLCommandQueue!
var PipelineState: MTLRenderPipelineState?
var VertexBuffer: MTLBuffer?
override func viewDidLoad() {
super.viewDidLoad()
MetalView.device = MTLCreateSystemDefaultDevice()
Device = MetalView.device
MetalView.clearColor = MTLClearColorMake(0, 1, 1, 1)
CommandQue = Device.makeCommandQueue()
let CommandBuffer = CommandQue.makeCommandBuffer()
let CommandEncoder = CommandBuffer!.makeRenderCommandEncoder(descriptor: MetalView.currentRenderPassDescriptor!)
let Library = Device.makeDefaultLibrary()
let VertexFunction = Library!.makeFunction(name: "VertexShader")
let FragmentFunction = Library!.makeFunction(name: "FragmentShader")
let PipelineDescriptor = MTLRenderPipelineDescriptor()
PipelineDescriptor.vertexFunction = VertexFunction
PipelineDescriptor.fragmentFunction = FragmentFunction
PipelineDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
do {
PipelineState = try Device.makeRenderPipelineState(descriptor: PipelineDescriptor)
} catch let Error as NSError {
print("Error: \(Error.localizedDescription)")
}
let Vertices: [Float] = [0, 1, 0, -1, -1, 0, 1, -1, 0]
VertexBuffer = Device.makeBuffer(bytes: Vertices, length: Vertices.count*MemoryLayout<Float>.size, options: [])
CommandEncoder!.setRenderPipelineState(PipelineState!)
CommandEncoder!.setVertexBuffer(VertexBuffer, offset: 0, index: 0)
CommandEncoder!.drawPrimitives(type: .triangle, vertexStart: 0, vertexCount: Vertices.count)
CommandEncoder!.endEncoding()
CommandBuffer!.present(MetalView.currentDrawable!)
CommandBuffer!.commit()
}
}
Not sure, but I think viewDidLoad is too early for rendering. The view might not have its final size here.
Instead, only perform the initialization code in viewDidLoad (pipeline state and buffers), register yourself as the delegate of the MTKView and then implement this callback function:
func draw(in view: MTKView) {
// your drawing code here
}
This is called every time you view gets drawn to the screen.
As Frank has suggested, viewDidLoad is not the place to issue draw calls. This way you are rendering only 1 frame.
About the aliasing, it depends on the resolution you are rendering in. But there are techniques to handle that, like smoothstep()ing edges in the fragment shader or post-processing anti-aliasing like rendering at higher resolution and then downsampling.
Also, in swift variables use camelCase, using PascalCase seems like you are calling statics on classes.

Implementing AVVideoCompositing causes video rotation problems

I using Apple's example https://developer.apple.com/library/ios/samplecode/AVCustomEdit/Introduction/Intro.html and have some issues with video transformation.
If source assets have preferredTransform other than identity, output video will have incorrectly rotated frames. This problem can be fixed if AVMutableVideoComposition doesn't have value in property customVideoCompositorClass and when AVMutableVideoCompositionLayerInstruction's transform is setted up with asset.preferredTransform. But in reason of using custom video compositor, which adopting an AVVideoCompositing protocol I can't use standard video compositing instructions.
How can I pre-transform input asset tracks before it's CVPixelBuffer's putted into Metal shaders? Or there are any other way to fix it?
Fragment of original code:
func buildCompositionObjectsForPlayback(_ forPlayback: Bool, overwriteExistingObjects: Bool) {
// Proceed only if the composition objects have not already been created.
if self.composition != nil && !overwriteExistingObjects { return }
if self.videoComposition != nil && !overwriteExistingObjects { return }
guard !clips.isEmpty else { return }
// Use the naturalSize of the first video track.
let videoTracks = clips[0].tracks(withMediaType: AVMediaType.video)
let videoSize = videoTracks[0].naturalSize
let composition = AVMutableComposition()
composition.naturalSize = videoSize
/*
With transitions:
Place clips into alternating video & audio tracks in composition, overlapped by transitionDuration.
Set up the video composition to cycle between "pass through A", "transition from A to B", "pass through B".
*/
let videoComposition = AVMutableVideoComposition()
if self.transitionType == TransitionType.diagonalWipe.rawValue {
videoComposition.customVideoCompositorClass = APLDiagonalWipeCompositor.self
} else {
videoComposition.customVideoCompositorClass = APLCrossDissolveCompositor.self
}
// Every videoComposition needs these properties to be set:
videoComposition.frameDuration = CMTimeMake(1, 30) // 30 fps.
videoComposition.renderSize = videoSize
buildTransitionComposition(composition, andVideoComposition: videoComposition)
self.composition = composition
self.videoComposition = videoComposition
}
UPDATE:
I did workaround for transforming like this:
private func makeTransformedPixelBuffer(fromBuffer buffer: CVPixelBuffer, withTransform transform: CGAffineTransform) -> CVPixelBuffer? {
guard let newBuffer = renderContext?.newPixelBuffer() else {
return nil
}
// Correct transformation example I took from https://stackoverflow.com/questions/29967700/coreimage-coordinate-system
var preferredTransform = transform
preferredTransform.b *= -1
preferredTransform.c *= -1
var transformedImage = CIImage(cvPixelBuffer: buffer).transformed(by: preferredTransform)
preferredTransform = CGAffineTransform(translationX: -transformedImage.extent.origin.x, y: -transformedImage.extent.origin.y)
transformedImage = transformedImage.transformed(by: preferredTransform)
let filterContext = CIContext(mtlDevice: MTLCreateSystemDefaultDevice()!)
filterContext.render(transformedImage, to: newBuffer)
return newBuffer
}
But wondering if there are more memory-effective way without creation of new pixel buffers
How can I pre-transform input asset tracks before it's CVPixelBuffer's
putted into Metal shaders?
The best way to achieve maximum performance is to transform your video frame directly in shader. You just need to add rotation matrix in your Vertex shader.

Reshape Face Coordinate in Swift

I want to reshape the face coordinate like showing in the video: https://www.dropbox.com/s/vsttylwgt25szha/IMG_6590.TRIM.MOV?dl=0 (Sorry, unfortunetly the video is about 11 MB in size).
I've just capture the face coordinate using iOS Vision API:
// Facial landmarks are GREEN.
fileprivate func drawFeatures(onFaces faces: [VNFaceObservation], onImageWithBounds bounds: CGRect) {
CATransaction.begin()
for faceObservation in faces {
let faceBounds = boundingBox(forRegionOfInterest: faceObservation.boundingBox, withinImageBounds: bounds)
guard let landmarks = faceObservation.landmarks else {
continue
}
// Iterate through landmarks detected on the current face.
let landmarkLayer = CAShapeLayer()
let landmarkPath = CGMutablePath()
let affineTransform = CGAffineTransform(scaleX: faceBounds.size.width, y: faceBounds.size.height)
// Treat eyebrows and lines as open-ended regions when drawing paths.
let openLandmarkRegions: [VNFaceLandmarkRegion2D?] = [
//landmarks.leftEyebrow,
//landmarks.rightEyebrow,
landmarks.faceContour,
landmarks.noseCrest,
// landmarks.medianLine
]
// Draw eyes, lips, and nose as closed regions.
let closedLandmarkRegions = [
landmarks.nose
].compactMap { $0 } // Filter out missing regions.
// Draw paths for the open regions.
for openLandmarkRegion in openLandmarkRegions where openLandmarkRegion != nil {
landmarkPath.addPoints(in: openLandmarkRegion!,
applying: affineTransform,
closingWhenComplete: false)
}
// Draw paths for the closed regions.
for closedLandmarkRegion in closedLandmarkRegions {
landmarkPath.addPoints(in: closedLandmarkRegion ,
applying: affineTransform,
closingWhenComplete: true)
}
// Format the path's appearance: color, thickness, shadow.
landmarkLayer.path = landmarkPath
landmarkLayer.lineWidth = 1
landmarkLayer.strokeColor = UIColor.green.cgColor
landmarkLayer.fillColor = nil
landmarkLayer.shadowOpacity = 1.0
landmarkLayer.shadowRadius = 1
// Locate the path in the parent coordinate system.
landmarkLayer.anchorPoint = .zero
landmarkLayer.frame = faceBounds
landmarkLayer.transform = CATransform3DMakeScale(1, -1, 1)
pathLayer?.addSublayer(landmarkLayer)
}
CATransaction.commit()
}
How to step forward from here? Can anyone guide me please?

How do you extend the space (bounds) of a CIImage without stretching the original?

I'm applying several filters on an already cropped image, and I'd like a flipped duplicate of it next to the original. This would make it twice as wide.
Problem: How do you extend the bounds so both can fit? .cropped(to:CGRect) will stretch whatever original content was there. The reason there is existing content is because I'm trying to use applyingFilter as much as possible to save on processing. It's also why I'm cropping the original un-mirrored image.
Below is my CIImage "alphaMaskBlend2" with a compositing filter, and a transform applied to the same image that flips it and adjusts its position. sourceCore.extent is the size I want the final image.
alphaMaskBlend2 = alphaMaskBlend2?.applyingFilter("CISourceAtopCompositing",
parameters: [kCIInputImageKey: (alphaMaskBlend2?.transformed(by: scaledImageTransform))!,
kCIInputBackgroundImageKey: alphaMaskBlend2!]).cropped(to: sourceCore.extent)
I've played around with the position of the transform in LLDB. I found with this filter being cropped, the left most image becomes stretched. If I use clamped to the same extent, and then I re-crop the image to the same extent again, the image is no longer distorted, but the bounds of the image is only half the width that it should be.
The only way I could achieve this, is compositing against a background image (sourceCore) that would be the size of the two images combined, and then compositing the other image:
alphaMaskBlend2 = alphaMaskBlend2?.applyingFilter("CISourceAtopCompositing",
parameters: [kCIInputImageKey: alphaMaskBlend2!,
kCIInputBackgroundImageKey: sourceCore])
alphaMaskBlend2 = alphaMaskBlend2?.applyingFilter("CISourceAtopCompositing",
parameters: [kCIInputImageKey: (alphaMaskBlend2?.cropped(to: cropRect).transformed(by: scaledImageTransform))!,
kCIInputBackgroundImageKey: alphaMaskBlend2!])
Problem is, that this is more expensive than necessary. I even tested it with benchmarking. It would make a lot more sense if I could do this with one composite.
While I can "flip" a CIImage I couldn't find a way to use an existing CIFilter to "stitch" it along side the original. However, with some basic knowledge of writing your own CIKernel, you can. A simple project of achieving this is here.
This project contains a sample image, and using CoreImage and a GLKView it:
flips the image by transposing the Y "bottom/top" coordinates for CIPerspectiveCorrection
creates a new "palette" image using CIConstantColor and then crops it using CICrop to be twice the width of the original
uses a very simple CIKernel (registered as "Stitch" to actually stitch it together
Here's the code to flip:
// use CIPerspectiveCorrection to "flip" on the Y axis
let minX:CGFloat = 0
let maxY:CGFloat = 0
let maxX = originalImage?.extent.width
let minY = originalImage?.extent.height
let flipFilter = CIFilter(name: "CIPerspectiveCorrection")
flipFilter?.setValue(CIVector(x: minX, y: maxY), forKey: "inputTopLeft")
flipFilter?.setValue(CIVector(x: maxX!, y: maxY), forKey: "inputTopRight")
flipFilter?.setValue(CIVector(x: minX, y: minY!), forKey: "inputBottomLeft")
flipFilter?.setValue(CIVector(x: maxX!, y: minY!), forKey: "inputBottomRight")
flipFilter?.setValue(originalImage, forKey: "inputImage")
flippedImage = flipFilter?.outputImage
Here's the code to create the palette:
let paletteFilter = CIFilter(name: "CIConstantColorGenerator")
paletteFilter?.setValue(CIColor(red: 0.7, green: 0.4, blue: 0.4), forKey: "inputColor")
paletteImage = paletteFilter?.outputImage
let cropFilter = CIFilter(name: "CICrop")
cropFilter?.setValue(paletteImage, forKey: "inputImage")
cropFilter?.setValue(CIVector(x: 0, y: 0, z: (originalImage?.extent.width)! * 2, w: (originalImage?.extent.height)!), forKey: "inputRectangle")
paletteImage = cropFilter?.outputImage
Here's the code to register and use the custom CIFilter:
// register and use stitch filer
StitchedFilters.registerFilters()
let stitchFilter = CIFilter(name: "Stitch")
stitchFilter?.setValue(originalImage?.extent.width, forKey: "inputThreshold")
stitchFilter?.setValue(paletteImage, forKey: "inputPalette")
stitchFilter?.setValue(originalImage, forKey: "inputOriginal")
stitchFilter?.setValue(flippedImage, forKey: "inputFlipped")
finalImage = stitchFilter?.outputImage
All of this code (long with layout constraints) in the demo project is in viewDidLoad, so please, place it where it belongs!
Here's the code to (a) create a CIFilter subclass called Stitch and (b) register it so you can use it like any other filter:
func openKernelFile(_ name:String) -> String {
let filePath = Bundle.main.path(forResource: name, ofType: ".cikernel")
do {
return try String(contentsOfFile: filePath!)
}
catch let error as NSError {
return error.description
}
}
let CategoryStitched = "Stitch"
class StitchedFilters: NSObject, CIFilterConstructor {
static func registerFilters() {
CIFilter.registerName(
"Stitch",
constructor: StitchedFilters(),
classAttributes: [
kCIAttributeFilterCategories: [CategoryStitched]
])
}
func filter(withName name: String) -> CIFilter? {
switch name {
case "Stitch":
return Stitch()
default:
return nil
}
}
}
class Stitch:CIFilter {
let kernel = CIKernel(source: openKernelFile("Stitch"))
var inputThreshold:Float = 0
var inputPalette: CIImage!
var inputOriginal: CIImage!
var inputFlipped: CIImage!
override var attributes: [String : Any] {
return [
kCIAttributeFilterDisplayName: "Stitch",
"inputThreshold": [kCIAttributeIdentity: 0,
kCIAttributeClass: "NSNumber",
kCIAttributeDisplayName: "Threshold",
kCIAttributeDefault: 0.5,
kCIAttributeMin: 0,
kCIAttributeSliderMin: 0,
kCIAttributeSliderMax: 1,
kCIAttributeType: kCIAttributeTypeScalar],
"inputPalette": [kCIAttributeIdentity: 0,
kCIAttributeClass: "CIImage",
kCIAttributeDisplayName: "Palette",
kCIAttributeType: kCIAttributeTypeImage],
"inputOriginal": [kCIAttributeIdentity: 0,
kCIAttributeClass: "CIImage",
kCIAttributeDisplayName: "Original",
kCIAttributeType: kCIAttributeTypeImage],
"inputFlipped": [kCIAttributeIdentity: 0,
kCIAttributeClass: "CIImage",
kCIAttributeDisplayName: "Flipped",
kCIAttributeType: kCIAttributeTypeImage]
]
}
override init() {
super.init()
}
override func setValue(_ value: Any?, forKey key: String) {
switch key {
case "inputThreshold":
inputThreshold = value as! Float
case "inputPalette":
inputPalette = value as! CIImage
case "inputOriginal":
inputOriginal = value as! CIImage
case "inputFlipped":
inputFlipped = value as! CIImage
default:
break
}
}
#available(*, unavailable) required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override var outputImage: CIImage {
return kernel!.apply(
extent: inputPalette.extent,
roiCallback: {(index, rect) in return rect},
arguments: [
inputThreshold as Any,
inputPalette as Any,
inputOriginal as Any,
inputFlipped as Any
])!
}
}
Finally, the CIKernel code:
kernel vec4 stitch(float threshold, sampler palette, sampler original, sampler flipped) {
vec2 coord = destCoord();
if (coord.x < threshold) {
return sample(original, samplerCoord(original));
} else {
vec2 flippedCoord = coord - vec2(threshold, 0.0);
vec2 flippedCoordinate = samplerTransform(flipped, flippedCoord);
return sample(flipped, flippedCoordinate);
}
}
Now, someone else may have something more elegant - maybe even using an existing CIFilter - but this works well. It only uses the GPU, so performance-wise, can be used in "real time". I added unneeded code (registering the filter, using a dictionary to define attributes) to make it more of a teaching exercise for those new to creating CIKernels that anyone with knowledge of using CIFilters can consume. If you focus on the kernel code, you'll recognize how similar to C it looks.
Last, a caveat. I am only stitching the (Y-axis) flipped image to the right of the original. You'll need to adjust things if you want something else.