Get snapshot from AVCaptureSession contaning Visionkit face detection elements - swift

I use AVCaptureSession to setup a camera view and using vision kit to detect and add a rectangular on the face.
Here is how I can do it
override func viewDidLoad() {
super.viewDidLoad()
self.prepareVisionRequest()
}
fileprivate func prepareVisionRequest() {
//self.trackingRequests = []
var requests = [VNTrackObjectRequest]()
let faceDetectionRequest = VNDetectFaceRectanglesRequest(completionHandler: { (request, error) in
if error != nil {
print("FaceDetection error: \(String(describing: error)).")
}
guard let faceDetectionRequest = request as? VNDetectFaceRectanglesRequest,
let results = faceDetectionRequest.results else {
return
}
DispatchQueue.main.async {
// Add the observations to the tracking list
for observation in results {
let faceTrackingRequest = VNTrackObjectRequest(detectedObjectObservation: observation)
faceTrackingRequest.trackingLevel = .fast
requests.append(faceTrackingRequest)
}
self.trackingRequests = requests
}
})
// Start with detection. Find face, then track it.
self.detectionRequests = [faceDetectionRequest]
self.sequenceRequestHandler = VNSequenceRequestHandler()
self.setupVisionDrawingLayers()
}
// MARK: Drawing Vision Observations
fileprivate func setupVisionDrawingLayers() {
let captureDeviceResolution = self.captureDeviceResolution
let captureDeviceBounds = CGRect(x: 0,
y: 0,
width: captureDeviceResolution.width,
height: captureDeviceResolution.height)
let captureDeviceBoundsCenterPoint = CGPoint(x: captureDeviceBounds.midX,
y: captureDeviceBounds.midY)
let normalizedCenterPoint = CGPoint(x: 0.5, y: 0.5)
guard let rootLayer = self.rootLayer else {
self.presentErrorAlert(message: "view was not property initialized")
return
}
let overlayLayer = CALayer()
overlayLayer.name = "DetectionOverlay"
overlayLayer.masksToBounds = true
overlayLayer.anchorPoint = normalizedCenterPoint
overlayLayer.bounds = captureDeviceBounds
overlayLayer.position = CGPoint(x: rootLayer.bounds.midX, y: rootLayer.bounds.midY)
let faceRectangleShapeLayer = CAShapeLayer()
faceRectangleShapeLayer.name = "RectangleOutlineLayer"
faceRectangleShapeLayer.bounds = captureDeviceBounds
faceRectangleShapeLayer.anchorPoint = normalizedCenterPoint
faceRectangleShapeLayer.position = captureDeviceBoundsCenterPoint
faceRectangleShapeLayer.fillColor = UIColor.white.withAlphaComponent(0.9).cgColor
// faceLandmarksShapeLayer.strokeColor = UIColor.white.withAlphaComponent(0.7).cgColor
faceRectangleShapeLayer.lineWidth = 5
faceRectangleShapeLayer.shadowOpacity = 0.7
faceRectangleShapeLayer.shadowRadius = 5
let faceLandmarksShapeLayer = CAShapeLayer()
faceLandmarksShapeLayer.name = "FaceLandmarksLayer"
faceLandmarksShapeLayer.bounds = captureDeviceBounds
faceLandmarksShapeLayer.anchorPoint = normalizedCenterPoint
faceLandmarksShapeLayer.position = captureDeviceBoundsCenterPoint
faceLandmarksShapeLayer.fillColor = nil
faceLandmarksShapeLayer.strokeColor = nil
//
overlayLayer.addSublayer(faceRectangleShapeLayer)
faceRectangleShapeLayer.addSublayer(faceLandmarksShapeLayer)
rootLayer.addSublayer(overlayLayer)
self.detectionOverlayLayer = overlayLayer
self.detectedFaceRectangleShapeLayer = faceRectangleShapeLayer
self.detectedFaceLandmarksShapeLayer = faceLandmarksShapeLayer
self.updateLayerGeometry()
}
Now, I'm trying three ways to take snapshots
1- using UIGraphicsImageRenderer, it shows only the rectangular on the face and the camera view in not visible - it's black
2- Take image from captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) - the image from the buffer shows only the camera view, no rectangular
3- use AVCapturePhotoCaptureDelegate to capture photo from AVCaptureSession, the captured photo shows only the camera view, no rectangular
Could you please help me to take snapshot that contains both camera view and the rectangular! Thanks

Related

CIImage pixelBuffer always return nil

I am doing some task to apply filter effect in to my WebRTC call, follow this tutorial:
https://developer.apple.com/documentation/vision/applying_matte_effects_to_people_in_images_and_video
Here is my code to convert:
func capturer(_ capturer: RTCVideoCapturer, didCapture frame: RTCVideoFrame) {
let pixelBufferr = frame.buffer as! RTCCVPixelBuffer
let pixelBufferRef = pixelBufferr.pixelBuffer
if #available(iOS 15.0, *) {
DispatchQueue.global().async {
if let output = GreetingProcessor.shared.processVideoFrame(
foreground: pixelBufferRef,
background: self.vbImage) {
print("new output: \(output) => \(output.pixelBuffer) + \(self.buffer(from: output))")
guard let px = output.pixelBuffer else { return }
let rtcPixelBuffer = RTCCVPixelBuffer(pixelBuffer: px)
let i420buffer = rtcPixelBuffer.toI420()
let newFrame = RTCVideoFrame(buffer: i420buffer, rotation: frame.rotation, timeStampNs: frame.timeStampNs)
self.videoSource.capturer(capturer, didCapture: newFrame)
}
}
}
}
THen here is how I apply effect:
func blendImages(
background: CIImage,
foreground: CIImage,
mask: CIImage,
isRedMask: Bool = false
) -> CIImage? {
// scale mask
let maskScaleX = foreground.extent.width / mask.extent.width
let maskScaleY = foreground.extent.height / mask.extent.height
let maskScaled = mask.transformed(by: __CGAffineTransformMake(maskScaleX, 0, 0, maskScaleY, 0, 0))
// scale background
let backgroundScaleX = (foreground.extent.width / background.extent.width)
let backgroundScaleY = (foreground.extent.height / background.extent.height)
let backgroundScaled = background.transformed(
by: __CGAffineTransformMake(backgroundScaleX, 0, 0, backgroundScaleY, 0, 0))
let blendFilter = isRedMask ? CIFilter.blendWithRedMask() : CIFilter.blendWithMask()
blendFilter.inputImage = foreground
blendFilter.backgroundImage = backgroundScaled
blendFilter.maskImage = maskScaled
return blendFilter.outputImage
}
The problem is output.pixelBuffer always nil, so I can not create RTCFrame to pass it again to delegate
Can someone help?

show masking on object which is between camera and wall using RealityKit

I made a video for generating a floor plan in which I need to capture the wall and floor together at a certain position if a user is too near to the wall or if any object come between the camera and wall/floor then need to show Too Close mask on that object something like display in this video.
I try to use rycast in session(_ session: ARSession, didUpdate frame: ARFrame) method but I am very new in AR and not know which method we need to use.
func session(_ session: ARSession, didUpdate frame: ARFrame) {
guard let query = self.arView?.makeRaycastQuery(from: self.arView?.center ?? CGPoint.zero,
allowing: .estimatedPlane,
alignment: .any)
else { return }
guard let raycastResult = self.arView?.session.raycast(query).first
else { return }
let currentPositionOfCamera = raycastResult.worldTransform.getPosition()
if currentPositionOfCamera != .zero {
let distanceFromCamera = frame.camera.transform.getPosition().distanceFrom(position: currentPositionOfCamera)
print("Distance from raycast:",distanceFromCamera)
if (distance < 0.5) {
print("Too Close")
}
}
}
I am just learning ARKit and RealityKit as well, but wouldn't your code be:
let currentPositionOfCamera = self.arView.cameraTransform.translation
if currentPositionOfCamera != .zero {
// distance is defined in simd as the distance between 2 points
let distanceFromCamera = distance(raycastResult.worldTransform.position, currentPositionOfCamera)
print("Distance from raycast:",distanceFromCamera)
if (distanceFromCamera < 0.5) {
print("Too Close")
let rayDirection = normalize(raycastResult.worldTransform.position - self.arView.cameraTransform.translation)
// This pulls the text back toward the camera from the plane
let textPositionInWorldCoordinates = result.worldTransform.position - (rayDirection * 0.1)
let textEntity = self.model(for: classification)
// This scales the text so it is of a consistent size
textEntity.scale = .one * raycastDistance
var textPositionWithCameraOrientation = self.arView.cameraTransform
textPositionWithCameraOrientation.translation = textPositionInWorldCoordinates
// self.textAnchor is defined somewhere in the class as an optional
self.textAnchor = AnchorEntity(world: textPositionWithCameraOrientation.matrix)
textAnchor.addChild(textEntity)
self.arView.scene.addAnchor(textAnchor)
} else {
guard let textAnchor = self.textAnchor else { return }
self.removeAnchor(textAnchor)
}
}
// Creates a text ModelEntity
func tooCloseModel() -> ModelEntity {
let lineHeight: CGFloat = 0.05
let font = MeshResource.Font.systemFont(ofSize: lineHeight)
let textMesh = MeshResource.generateText("Too Close", extrusionDepth: Float(lineHeight * 0.1), font: font)
let textMaterial = SimpleMaterial(color: classification.color, isMetallic: true)
let model = ModelEntity(mesh: textMesh, materials: [textMaterial])
// Center the text
model.position.x -= model.visualBounds(relativeTo: nil).extents.x / 2
return model
}
This code is adapted from Apple's Visualizing Scene Semantics.

Get Size of image in SCNNode / ARKit Swift

I'm trying to scan a Reference-Image an then display the image itself above the printed reference-image. The "virutal" image size should be the same like the printed size.
My idea: get the size of the printed Reference-Image, then scale the image in the SCNNode to this size (or scale the SCNNode to this size?)
But: 1-> How to get the size of the printed image, 2-> for scaling the SCNNode I need the size of this node, too. How to get it?
import UIKit
import SceneKit
import ARKit
import AVKit
import AVFoundation
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
private var planeNode: SCNNode?
private var imageNode: SCNNode?
private var animationInfo: AnimationInfo?
private var currentMediaName: String?
private var scrollView: UIScrollView!
override func viewDidLoad() {
super.viewDidLoad()
let scene = SCNScene()
sceneView.scene = scene
sceneView.delegate = self
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Load reference images to look for from "AR Resources" folder
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else {
fatalError("Missing expected asset catalog resources.")
}
// Create a session configuration
let configuration = ARWorldTrackingConfiguration()
// Add previously loaded images to ARScene configuration as detectionImages
configuration.detectionImages = referenceImages
// Run the view's session
sceneView.session.run(configuration)
let tap = UITapGestureRecognizer(target: self, action: #selector(handleTap(rec:)))
//Add recognizer to sceneview
sceneView.addGestureRecognizer(tap)
}
//Method called when tap
#objc func handleTap(rec: UITapGestureRecognizer){
let location: CGPoint = rec.location(in: sceneView)
let hits = self.sceneView.hitTest(location, options: nil)
if !hits.isEmpty{
let tappedNode = hits.first?.node
if tappedNode != nil && tappedNode?.name != nil{
let stringArr = tappedNode?.name?.components(separatedBy: "-")
let name = stringArr! [0]
let size = stringArr! [1].components(separatedBy: ",")
let width = Float(size [0])
let height = Float(size [1])
loadReferenceImage(tappedNode: tappedNode!, name: (name), width: width!, height: height!)
}
}
}
private func playVideo() {
guard let path = Bundle.main.path(forResource: "video", ofType:"m4v") else {
debugPrint("video.m4v not found")
return
}
let player = AVPlayer(url: URL(fileURLWithPath: path))
let playerController = AVPlayerViewController()
playerController.player = player
present(playerController, animated: true) {
player.play()
}
}
func loadReferenceImage(tappedNode: SCNNode, name: String, width: Float, height: Float){
print("TAP")
print(name)
let currentNode = tappedNode.parent
if let image = UIImage(named: "col" + name){
let childNodes = currentNode?.childNodes
for node in (childNodes)!{
node.removeFromParentNode()
}
let newImage = UIImage(named: "col" + name)
let newnode = SCNNode(geometry: SCNPlane(width: CGFloat(width), height: CGFloat(height)))
newnode.geometry?.firstMaterial?.diffuse.contents = newImage
newnode.scale = SCNVector3(x: 10, y: 10, z: 10)
currentNode?.removeAnimation(forKey: "spin_around")
let rotation = SCNVector3((currentNode?.eulerAngles.x)!-0.95,(currentNode?.eulerAngles.y)!,(currentNode?.eulerAngles.z)!)
currentNode?.eulerAngles = rotation
//SIZE??????
let nodex = currentNode?.scale.x
let nodey = currentNode?.scale.y
let nodez = currentNode?.scale.z
let factorx = width / nodex!
let factory = height / nodey!
currentNode?.addChildNode(newnode)
}
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else {
return
}
// 1. Load scene.
let planeScene = SCNScene(named: "art.scnassets/plane.scn")!
let planeNode = planeScene.rootNode.childNode(withName: "planeRootNode", recursively: true)!
// 2. Calculate size based on planeNode's bounding box.
let (min, max) = planeNode.boundingBox
let size = SCNVector3Make(max.x - min.x, max.y - min.y, max.z - min.z)
// 3. Calculate the ratio of difference between real image and object size.
// Ignore Y axis because it will be pointed out of the image.
let widthRatio = Float(imageAnchor.referenceImage.physicalSize.width)/1.2
let heightRatio = Float(imageAnchor.referenceImage.physicalSize.height)/1.2
let width = imageAnchor.referenceImage.physicalSize.width
let height = imageAnchor.referenceImage.physicalSize.height
let prefix = "-"
let imageSize = width.description + "," + height.description
let targetName = imageAnchor.referenceImage.name! + prefix + imageSize
// Pick smallest value to be sure that object fits into the image.
let finalRatio = [widthRatio, heightRatio].min()!
// 4. Set transform from imageAnchor data.
planeNode.transform = SCNMatrix4(imageAnchor.transform)
// 5. Animate appearance by scaling model from 0 to previously calculated value.
let appearanceAction = SCNAction.scale(to: CGFloat(finalRatio), duration: 0.4)
//test
appearanceAction.timingMode = .easeOut
// Set initial scale to 0.
planeNode.scale = SCNVector3Make(0 , 0, 0)
//rotate y
let spin = CABasicAnimation(keyPath: "rotation")
spin.fromValue = NSValue(scnVector4: SCNVector4(x: 0, y: 1, z: 0, w: 0))
spin.toValue = NSValue(scnVector4: SCNVector4(x: 0, y: 1, z: 0, w: Float(CGFloat(2 * Double.pi))))
spin.duration = 4
spin.repeatCount = .infinity
planeNode.addAnimation(spin, forKey: "spin_around")
// Add to root node.
sceneView.scene.rootNode.addChildNode(planeNode)
// Run the appearance animation.
planeNode.runAction(appearanceAction)
planeNode.name = targetName
let nodes = planeNode.childNodes
for node in nodes{
node.name = targetName
}
self.planeNode = planeNode
self.imageNode = node
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor, updateAtTime time: TimeInterval) {
guard let imageNode = imageNode, let planeNode = planeNode else {
return
}
// 1. Unwrap animationInfo. Calculate animationInfo if it is nil.
guard let animationInfo = animationInfo else {
refreshAnimationVariables(startTime: time,
initialPosition: planeNode.simdWorldPosition,
finalPosition: imageNode.simdWorldPosition,
initialOrientation: planeNode.simdWorldOrientation,
finalOrientation: imageNode.simdWorldOrientation)
return
}
// 2. Calculate new animationInfo if image position or orientation changed.
if !simd_equal(animationInfo.finalModelPosition, imageNode.simdWorldPosition) || animationInfo.finalModelOrientation != imageNode.simdWorldOrientation {
refreshAnimationVariables(startTime: time,
initialPosition: planeNode.simdWorldPosition,
finalPosition: imageNode.simdWorldPosition,
initialOrientation: planeNode.simdWorldOrientation,
finalOrientation: imageNode.simdWorldOrientation)
}
// 3. Calculate interpolation based on passedTime/totalTime ratio.
let passedTime = time - animationInfo.startTime
var t = min(Float(passedTime/animationInfo.duration), 1)
// Applying curve function to time parameter to achieve "ease out" timing
t = sin(t * .pi * 0.5)
// 4. Calculate and set new model position and orientation.
let f3t = simd_make_float3(t, t, t)
planeNode.simdWorldPosition = simd_mix(animationInfo.initialModelPosition, animationInfo.finalModelPosition, f3t)
planeNode.simdWorldOrientation = simd_slerp(animationInfo.initialModelOrientation, animationInfo.finalModelOrientation, t)
//planeNode.simdWorldOrientation = imageNode.simdWorldOrientation
guard let currentImageAnchor = anchor as? ARImageAnchor else { return }
}
func refreshAnimationVariables(startTime: TimeInterval, initialPosition: float3, finalPosition: float3, initialOrientation: simd_quatf, finalOrientation: simd_quatf) {
let distance = simd_distance(initialPosition, finalPosition)
// Average speed of movement is 0.15 m/s.
let speed = Float(0.15)
// Total time is calculated as distance/speed. Min time is set to 0.1s and max is set to 2s.
let animationDuration = Double(min(max(0.1, distance/speed), 2))
// Store animation information for later usage.
animationInfo = AnimationInfo(startTime: startTime,
duration: animationDuration,
initialModelPosition: initialPosition,
finalModelPosition: finalPosition,
initialModelOrientation: initialOrientation,
finalModelOrientation: finalOrientation)
}
}
In order to do this I believe that first you need to get the size in Pixels of the UIImage by
multiplying the size values by the value in the scale property to get
the pixel dimensions of the image.
As such an example would be something like so:
guard let image = UIImage(named: "launchScreen") else { return }
let pixelWidth = image.size.width * image.scale
let pixelHeight = image.size.height * image.scale
print(pixelWidth, pixelHeight)
The size of my image when made in Adobe Illustrator was 3072 x 4099, and when I logged the results in the console the dimensions were also the same.
Now the tricky part here is calculating the pixels to a size we can use in ARKit, remembering that different devices have a different PPI (Pixels Per Inch) density.
In my example I am just going to use the PPI of an iPhone7Plus which is 401.
//1. Get The PPI Of The iPhone7Plus
let iphone7PlusPixelsPerInch: CGFloat = 401
//2. To Get The Image Size In Inches We Need To Divide By The PPI
let inchWidth = pixelWidth/iphone7PlusPixelsPerInch
let inchHeight = pixelHeight/iphone7PlusPixelsPerInch
//3. Calculate The Size In Metres (There Are 2.54 Cm's In An Inch)
let widthInMetres = (inchWidth * 2.54) / 100
let heightInMeters = (inchHeight * 2.54) / 100
Now we have the size of our Image in Metres it is simple to create an SCNNode of that size e.g:
//1. Generate An SCNPlane With The Same Size As Our Image
let realScaleNode = SCNNode(geometry: SCNPlane(width: widthInMetres, height: heightInMeters))
realScaleNode.geometry?.firstMaterial?.diffuse.contents = image
realScaleNode.position = SCNVector3(0, 0, -1)
//2. Add It To Our Hierachy
self.augmentedRealityView.scene.rootNode.addChildNode(realScaleNode)
Hope it helps...
P.S: This may be useful for helping you get the PPI of the Screen (marchv/UIScreenExtension)

Color keying video with GPUImage on a SCNPlane in ARKit

I am trying to play a video, showing transparency in an ARSCNView. A SCNPlane is used as a projection space for the video and I am trying to color key this video with GPUImage.
I followed this example here. Unfortunately, I have not found a way to project that video back on my videoSpriteKitNode. Because the filter is rendered in a GPUImageView, and the SKVideoNode takes a AVPlayer.
I am not sure if it is possible at all, what I am trying to do, so if anyone could share their insight I'd be very thankful!
import UIKit
import ARKit
import GPUImage
class ARTransVC: UIViewController{
#IBOutlet weak var sceneView: ARSCNView!
let configuration = ARWorldTrackingConfiguration()
var movie: GPUImageMovie!
var filter: GPUImageChromaKeyBlendFilter!
var sourcePicture: GPUImagePicture!
var player = AVPlayer()
var gpuImageView: GPUImageView!
override func viewDidLoad() {
super.viewDidLoad()
self.sceneView.debugOptions = [ARSCNDebugOptions.showWorldOrigin, ARSCNDebugOptions.showFeaturePoints]
self.sceneView.session.run(configuration)
self.gpuImageView = GPUImageView()
self.gpuImageView.translatesAutoresizingMaskIntoConstraints = false
//a delay for ARKit to capture the surroundings
DispatchQueue.main.asyncAfter(deadline: .now() + 3) {
// A SpriteKit scene to contain the SpriteKit video node
let spriteKitScene = SKScene(size: CGSize(width: self.sceneView.frame.width, height: self.sceneView.frame.height))
spriteKitScene.scaleMode = .aspectFit
// Create a video player, which will be responsible for the playback of the video material
guard let url = Bundle.main.url(forResource: "FY3A4278", withExtension: "mp4") else { return }
let playerItem = AVPlayerItem(url: url)
self.player.replaceCurrentItem(with: playerItem)
//trans
self.filter = GPUImageChromaKeyBlendFilter()
self.filter.thresholdSensitivity = 0.15
self.filter.smoothing = 0.3
self.filter.setColorToReplaceRed(0.322, green: 0.455, blue: 0.831)
self.movie = GPUImageMovie(playerItem: playerItem)
self.movie.playAtActualSpeed = true
self.movie.addTarget(self.filter)
self.movie.startProcessing()
let backgroundImage = UIImage(named: "transparent.png")
self.sourcePicture = GPUImagePicture(image: backgroundImage, smoothlyScaleOutput: true)!
self.sourcePicture.addTarget(self.filter)
self.sourcePicture.processImage()
///HERE DON'T KNOW HOW TO CONTINUE ?
self.filter.addTarget(self.gpuImageView)
// To make the video loop
self.player.actionAtItemEnd = .none
NotificationCenter.default.addObserver(
self,
selector: #selector(ARTransVC.playerItemDidReachEnd),
name: NSNotification.Name.AVPlayerItemDidPlayToEndTime,
object: self.player.currentItem)
// Create the SpriteKit video node, containing the video player
let videoSpriteKitNode = SKVideoNode(avPlayer: self.player)
videoSpriteKitNode.position = CGPoint(x: spriteKitScene.size.width / 2.0, y: spriteKitScene.size.height / 2.0)
videoSpriteKitNode.size = spriteKitScene.size
videoSpriteKitNode.yScale = -1.0
videoSpriteKitNode.play()
spriteKitScene.addChild(videoSpriteKitNode)
// Create the SceneKit scene
let scene = SCNScene()
self.sceneView.scene = scene
self.sceneView.isPlaying = true
// Create a SceneKit plane and add the SpriteKit scene as its material
let background = SCNPlane(width: CGFloat(1), height: CGFloat(1))
background.firstMaterial?.diffuse.contents = spriteKitScene
let backgroundNode = SCNNode(geometry: background)
backgroundNode.geometry?.firstMaterial?.isDoubleSided = true
backgroundNode.position = SCNVector3(0,0,-2.0)
scene.rootNode.addChildNode(backgroundNode)
}
}
#objc func playerItemDidReachEnd(notification: NSNotification) {
if let playerItem: AVPlayerItem = notification.object as? AVPlayerItem {
playerItem.seek(to: kCMTimeZero, completionHandler: nil)
}
}
}
So, I now managed to chroma-key and play a now transparent video in ARSCNView, but it still is a little patchy solution.
I stepped away from my former approach and implemented ChromaKeyMaterial from Lësha Turkowski!
Here it is, adjusted to the color I wanted to key:
import SceneKit
public class ChromaKeyMaterial: SCNMaterial {
public var backgroundColor: UIColor {
didSet { didSetBackgroundColor() }
}
public var thresholdSensitivity: Float {
didSet { didSetThresholdSensitivity() }
}
public var smoothing: Float {
didSet { didSetSmoothing() }
}
public init(backgroundColor: UIColor = .green, thresholdSensitivity: Float = 0.15, smoothing: Float = 0.0) {
self.backgroundColor = backgroundColor
self.thresholdSensitivity = thresholdSensitivity
self.smoothing = smoothing
super.init()
didSetBackgroundColor()
didSetThresholdSensitivity()
didSetSmoothing()
// chroma key shader is based on GPUImage
// https://github.com/BradLarson/GPUImage/blob/master/framework/Source/GPUImageChromaKeyFilter.m
let surfaceShader =
"""
uniform vec3 c_colorToReplace;
uniform float c_thresholdSensitivity;
uniform float c_smoothing;
#pragma transparent
#pragma body
vec3 textureColor = _surface.diffuse.rgb;
float maskY = 0.2989 * c_colorToReplace.r + 0.5866 * c_colorToReplace.g + 0.1145 * c_colorToReplace.b;
float maskCr = 0.7132 * (c_colorToReplace.r - maskY);
float maskCb = 0.5647 * (c_colorToReplace.b - maskY);
float Y = 0.2989 * textureColor.r + 0.5866 * textureColor.g + 0.1145 * textureColor.b;
float Cr = 0.7132 * (textureColor.r - Y);
float Cb = 0.5647 * (textureColor.b - Y);
float blendValue = smoothstep(c_thresholdSensitivity, c_thresholdSensitivity + c_smoothing, distance(vec2(Cr, Cb), vec2(maskCr, maskCb)));
float a = blendValue;
_surface.transparent.a = a;
"""
//_surface.transparent.a = a;
shaderModifiers = [
.surface: surfaceShader,
]
}
required public init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
//setting background color to be keyed out
private func didSetBackgroundColor() {
//getting pixel from background color
//let rgb = backgroundColor.cgColor.components!.map{Float($0)}
//let vector = SCNVector3(x: rgb[0], y: rgb[1], z: rgb[2])
let vector = SCNVector3(x: 0.216, y: 0.357, z: 0.663)
setValue(vector, forKey: "c_colorToReplace")
}
private func didSetSmoothing() {
setValue(smoothing, forKey: "c_smoothing")
}
private func didSetThresholdSensitivity() {
setValue(thresholdSensitivity, forKey: "c_thresholdSensitivity")
}
}
Here is the code that plays the keyed video in ARKit on a SCNPlane:
import UIKit
import ARKit
class ARTransVC: UIViewController{
#IBOutlet weak var arSceneView: ARSCNView!
let configuration = ARWorldTrackingConfiguration()
private var player: AVPlayer = {
guard let url = Bundle.main.url(forResource: "FY3A4278", withExtension: "mp4") else { fatalError() }
return AVPlayer(url: url)
}()
override func viewDidLoad() {
super.viewDidLoad()
self.arSceneView.debugOptions = [ARSCNDebugOptions.showWorldOrigin, ARSCNDebugOptions.showFeaturePoints]
self.arSceneView.session.run(configuration)
//a delay for ARKit to capture the surroundings
DispatchQueue.main.asyncAfter(deadline: .now() + 3) {
// A SpriteKit scene to contain the SpriteKit video node
let spriteKitScene = SKScene(size: CGSize(width: self.arSceneView.frame.width, height: self.arSceneView.frame.height))
spriteKitScene.scaleMode = .aspectFit
spriteKitScene.backgroundColor = .clear
spriteKitScene.scaleMode = .aspectFit
//Create the SpriteKit video node, containing the video player
let videoSpriteKitNode = SKVideoNode(avPlayer: self.player)
videoSpriteKitNode.position = CGPoint(x: spriteKitScene.size.width / 2.0, y: spriteKitScene.size.height / 2.0)
videoSpriteKitNode.size = spriteKitScene.size
videoSpriteKitNode.yScale = -1.0
videoSpriteKitNode.play()
spriteKitScene.addChild(videoSpriteKitNode)
// To make the video loop
self.player.actionAtItemEnd = .none
NotificationCenter.default.addObserver(
self,
selector: #selector(ARTransVC.playerItemDidReachEnd),
name: NSNotification.Name.AVPlayerItemDidPlayToEndTime,
object: self.player.currentItem)
// Create the SceneKit scene
let scene = SCNScene()
self.arSceneView.scene = scene
//Create a SceneKit plane and add the SpriteKit scene as its material
let background = SCNPlane(width: CGFloat(1), height: CGFloat(1))
background.firstMaterial?.diffuse.contents = spriteKitScene
let chromaKeyMaterial = ChromaKeyMaterial()
chromaKeyMaterial.diffuse.contents = self.player
let backgroundNode = SCNNode(geometry: background)
backgroundNode.geometry?.firstMaterial?.isDoubleSided = true
backgroundNode.geometry!.materials = [chromaKeyMaterial]
backgroundNode.position = SCNVector3(0,0,-2.0)
scene.rootNode.addChildNode(backgroundNode)
//video does not start without delaying the player
//playing the video before just results in [SceneKit] Error: Cannot get pixel buffer (CVPixelBufferRef)
DispatchQueue.main.asyncAfter(deadline: .now() + 1) {
self.player.seek(to:CMTimeMakeWithSeconds(1, 1000))
self.player.play()
}
}
}
#objc func playerItemDidReachEnd(notification: NSNotification) {
if let playerItem: AVPlayerItem = notification.object as? AVPlayerItem {
playerItem.seek(to: kCMTimeZero, completionHandler: nil)
}
}
I was getting a [SceneKit] Error: Cannot get pixel buffer (CVPixelBufferRef), which was apparently fixed in iOS 11.2. For now, I just found a rather patchy solution were I restarted the video after a one second delay. A better approach for that is greatly appreciated.
try clearing the background and set the scalemode with
backgroundColor = .clear
scaleMode = .aspectFit

How to build a well working Overlay SK Panel(HUD) on a SCNScene

the app I'm working on is supposed to show a 3D object and the user can pick a color to color it. I have a SCNScene with multiple mesh creating a 3D model. I need to build a side interactive panel with colors the user can use to color the 3D model. The code is here on github.
I show you my code (for now on one class only, that's bad i know)
import UIKit
import QuartzCore
import SceneKit
import SpriteKit
class GameViewController: UIViewController {
var cameraOrbit = SCNNode()
let cameraNode = SCNNode()
let camera = SCNCamera()
let floorNode = SCNNode()
var wallNode = SCNNode()
var lateralWallRight = SCNNode()
var lateralWallLeft = SCNNode()
var spotLightNode = SCNNode()
//HANDLE PAN CAMERA
var initialPositionCamera = SCNVector3(x: -25, y: 70, z: 1450)
var translateEnabled = false
var lastXPos:Float = 0.0
var lastYPos:Float = 0.0
var xPos:Float = 0.0
var yPos:Float = 0.0
var lastWidthRatio: Float = 0
var lastHeightRatio: Float = 0.1
var widthRatio: Float = 0
var heightRatio: Float = 0.1
var fingersNeededToPan = 1 //change this from GUI
var panAttenuation: Float = 10 //5.0: very fast ---- 40.0 very slow
let maxWidthRatioRight: Float = 0.2
let maxWidthRatioLeft: Float = -0.2
let maxHeightRatioXDown: Float = 0.065
let maxHeightRatioXUp: Float = 0.4
//HANDLE PINCH CAMERA
var pinchAttenuation = 1.0 //1.0: very fast ---- 100.0 very slow
var lastFingersNumber = 0
let maxPinch = 146.0
let minPinch = 40.0
//OVERLAY
var colorPanelScene = SKScene()
var pickedColor: UIColor = UIColor.whiteColor()
var NodesToColors = [SKSpriteNode: UIColor]()
var didPickColor = false
var OverlayBackground: SKSpriteNode = SKSpriteNode()
func setColors() {
//Color Setup
let ColorWhite = colorPanelScene.childNodeWithName("ColorWhite") as! SKSpriteNode
let ColorRed = colorPanelScene.childNodeWithName("ColorRed") as! SKSpriteNode
let ColorBrown = colorPanelScene.childNodeWithName("ColorBrown")as! SKSpriteNode
let ColorDarkBrown = colorPanelScene.childNodeWithName("ColorDarkBrown")as! SKSpriteNode
let white = UIColor(red:1, green:0.95, blue:0.71, alpha:1)
let brown = UIColor(red:0.49, green:0.26, blue:0.17, alpha:1)
let red = UIColor(red:0.67, green:0.32, blue:0.21, alpha:1)
let darkBrown = UIColor(red:0.27, green:0.25, blue:0.21, alpha:1)
NodesToColors = [
ColorWhite: white,
ColorRed: red,
ColorBrown: brown,
ColorDarkBrown: darkBrown
]
OverlayBackground = colorPanelScene.childNodeWithName("OverlayBackground")as! SKSpriteNode
}
func blur(image image: UIImage) -> UIImage {
let radius: CGFloat = 20;
let context = CIContext(options: nil);
let inputImage = CIImage(CGImage: image.CGImage!);
let filter = CIFilter(name: "CIGaussianBlur");
filter?.setValue(inputImage, forKey: kCIInputImageKey);
filter?.setValue("\(radius)", forKey:kCIInputRadiusKey);
let result = filter?.valueForKey(kCIOutputImageKey) as! CIImage;
let rect = CGRectMake(radius * 2, radius * 2, image.size.width - radius * 4, image.size.height - radius * 4)
let cgImage = context.createCGImage(result, fromRect: rect);
let returnImage = UIImage(CGImage: cgImage);
return returnImage;
}
override func viewDidLoad() {
super.viewDidLoad()
// create a new scene
let scene = SCNScene(named: "art.scnassets/Figure.scn")!
// MARK: Lights
//create and add a light to the scene
let lightNode = SCNNode()
lightNode.light = SCNLight()
lightNode.light!.type = SCNLightTypeOmni
lightNode.position = SCNVector3(x: 0, y: 1000, z: 1000)
scene.rootNode.addChildNode(lightNode)
// create and add an ambient light to the scene
let ambientLightNode = SCNNode()
ambientLightNode.light = SCNLight()
ambientLightNode.light!.type = SCNLightTypeAmbient
ambientLightNode.light!.color = UIColor.darkGrayColor()
scene.rootNode.addChildNode(ambientLightNode)
//MARK: Camera
camera.usesOrthographicProjection = true
camera.orthographicScale = 100
camera.zNear = 10
camera.zFar = 3000
cameraNode.position = initialPositionCamera
cameraNode.camera = camera
cameraOrbit = SCNNode()
cameraOrbit.addChildNode(cameraNode)
scene.rootNode.addChildNode(cameraOrbit)
//initial camera setup
self.cameraOrbit.eulerAngles.y = Float(-2 * M_PI) * lastWidthRatio
self.cameraOrbit.eulerAngles.x = Float(-M_PI) * lastHeightRatio
lastXPos = self.cameraNode.position.x
lastYPos = self.cameraNode.position.y
//MARK: Floor
let floor = SCNFloor()
floor.reflectionFalloffEnd = 0
floor.reflectivity = 0
floorNode.geometry = floor
floorNode.name = "Floor"
floorNode.geometry!.firstMaterial!.diffuse.contents = "art.scnassets/floor.png"
floorNode.geometry!.firstMaterial!.locksAmbientWithDiffuse = true
floorNode.geometry!.firstMaterial!.diffuse.wrapS = SCNWrapMode.Repeat
floorNode.geometry!.firstMaterial!.diffuse.wrapT = SCNWrapMode.Repeat
floorNode.geometry!.firstMaterial!.diffuse.mipFilter = SCNFilterMode.Nearest
floorNode.geometry!.firstMaterial!.doubleSided = false
floorNode.castsShadow = true
scene.rootNode.addChildNode(floorNode)
//MARK: Walls
// create the wall geometry
let wallGeometry = SCNPlane.init(width: 500.0, height: 300.0)
wallGeometry.firstMaterial!.diffuse.contents = "art.scnassets/background.jpg"
wallGeometry.firstMaterial!.diffuse.mipFilter = SCNFilterMode.Nearest
wallGeometry.firstMaterial!.diffuse.wrapS = SCNWrapMode.Repeat
wallGeometry.firstMaterial!.diffuse.wrapT = SCNWrapMode.Repeat
wallGeometry.firstMaterial!.doubleSided = false
wallGeometry.firstMaterial!.locksAmbientWithDiffuse = true
wallNode = SCNNode.init(geometry: wallGeometry)
wallNode.name = "FrontWall"
wallNode.position = SCNVector3Make(0, 120, -300) //this moves all 3 walls
wallNode.castsShadow = true
// RIGHT LATERAL WALL
lateralWallRight = SCNNode.init(geometry: wallGeometry)
lateralWallRight.name = "lateralWallRight"
lateralWallRight.position = SCNVector3Make(-300, -20, 150);
lateralWallRight.rotation = SCNVector4(x: 0, y: 1, z: 0, w: Float(M_PI/3))
lateralWallRight.castsShadow = true
wallNode.addChildNode(lateralWallRight)
// LEFT LATERAL WALL
lateralWallLeft = SCNNode.init(geometry: wallGeometry)
lateralWallLeft.name = "lateralWallLeft"
lateralWallLeft.position = SCNVector3Make(300, -20, 150);
lateralWallLeft.rotation = SCNVector4(x: 0, y: -1, z: 0, w: Float(M_PI/3))
lateralWallLeft.castsShadow = true
wallNode.addChildNode(lateralWallLeft)
//front walls
scene.rootNode.addChildNode(wallNode)
// retrieve the SCNView
let scnView = self.view as! SCNView
// set the scene to the view
scnView.scene = scene
// allows the user to manipulate the camera
scnView.allowsCameraControl = false //not needed
// configure the view
scnView.backgroundColor = UIColor.grayColor()
//MARK: Gesture Recognizer in SceneView
// add a pan gesture recognizer
let panGesture = UIPanGestureRecognizer(target: self, action: #selector(GameViewController.handlePan(_:)))
scnView.addGestureRecognizer(panGesture)
// add a tap gesture recognizer
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(GameViewController.handleTap(_:)))
scnView.addGestureRecognizer(tapGesture)
// add a pinch gesture recognizer
let pinchGesture = UIPinchGestureRecognizer(target: self, action: #selector(GameViewController.handlePinch(_:)))
scnView.addGestureRecognizer(pinchGesture)
//MARK: OverLay
colorPanelScene = SKScene(fileNamed: "art.scnassets/ColorPanelScene")!
scnView.overlaySKScene = colorPanelScene
scnView.overlaySKScene!.userInteractionEnabled = true;
didPickColor = false
setColors()
//let OverlayBackground = colorPanelScene.childNodeWithName("OverlayBackground")as! SKSpriteNode
}
func handlePan(gestureRecognize: UIPanGestureRecognizer) {
let numberOfTouches = gestureRecognize.numberOfTouches()
let translation = gestureRecognize.translationInView(gestureRecognize.view!)
if (numberOfTouches==fingersNeededToPan) {
widthRatio = Float(translation.x) / Float(gestureRecognize.view!.frame.size.width) + lastWidthRatio
heightRatio = Float(translation.y) / Float(gestureRecognize.view!.frame.size.height) + lastHeightRatio
// HEIGHT constraints
if (heightRatio >= maxHeightRatioXUp ) {
heightRatio = maxHeightRatioXUp
}
if (heightRatio <= maxHeightRatioXDown ) {
heightRatio = maxHeightRatioXDown
}
// WIDTH constraints
if(widthRatio >= maxWidthRatioRight) {
widthRatio = maxWidthRatioRight
}
if(widthRatio <= maxWidthRatioLeft) {
widthRatio = maxWidthRatioLeft
}
self.cameraOrbit.eulerAngles.y = Float(-2 * M_PI) * widthRatio
self.cameraOrbit.eulerAngles.x = Float(-M_PI) * heightRatio
lastFingersNumber = fingersNeededToPan
//TRANSLATION pan
} else if numberOfTouches == (fingersNeededToPan+1) {
if translateEnabled {
xPos = (lastXPos + Float(-translation.x))/(panAttenuation)
yPos = (lastYPos + Float(translation.y))/(panAttenuation)
self.cameraNode.position.x = xPos
self.cameraNode.position.y = yPos
}
lastFingersNumber = fingersNeededToPan+1
}
if (lastFingersNumber == fingersNeededToPan && numberOfTouches != fingersNeededToPan) {
lastWidthRatio = widthRatio
lastHeightRatio = heightRatio
}
if lastFingersNumber != (fingersNeededToPan+1) && numberOfTouches != (fingersNeededToPan+1) {
lastXPos = xPos
lastYPos = yPos
}
if (gestureRecognize.state == .Ended) {
if (lastFingersNumber==fingersNeededToPan) {
lastWidthRatio = widthRatio
lastHeightRatio = heightRatio
//print("lastHeight: \(round(lastHeightRatio*100))")
//print("lastWidth: \(round(lastWidthRatio*100))")
}
if lastFingersNumber==(fingersNeededToPan+1) {
lastXPos = xPos
lastYPos = yPos
print("lastX: \(xPos)")
print("lastY: \(yPos)")
}
print("Pan with \(lastFingersNumber) finger\(lastFingersNumber>1 ? "s" : "")")
}
}
func handlePinch(gestureRecognize: UIPinchGestureRecognizer) {
let pinchVelocity = Double.init(gestureRecognize.velocity)
//print("PinchVelocity \(pinchVelocity)")
camera.orthographicScale -= (pinchVelocity/pinchAttenuation)
if camera.orthographicScale <= minPinch {
camera.orthographicScale = minPinch
}
if camera.orthographicScale >= maxPinch {
camera.orthographicScale = maxPinch
}
if (gestureRecognize.state == .Ended) {
print("\nPinch: \(round(camera.orthographicScale))\n")
}
}
func handleTap(gestureRecognize: UIGestureRecognizer) {
print("---------------TAP-----------------")
// retrieve the SCNView
let scnView = self.view as! SCNView
let touchedPointInScene = gestureRecognize.locationInView(scnView)
let hitResults = scnView.hitTest(touchedPointInScene, options: nil)
let OverlayView = colorPanelScene.view! as SKView
let touchedPointInOverlay = gestureRecognize.locationInView(OverlayView)
// if button color are touched
if OverlayBackground.containsPoint(touchedPointInOverlay) {
print("OVERLAY: tap in \(touchedPointInOverlay)")
for (node, color) in NodesToColors {
// Check if the location of the touch is within the button's bounds
if node.containsPoint(touchedPointInOverlay) {
print("\(node.name!) -> color picked \(color.description)")
pickedColor = color
didPickColor = true
}
}
} else {//if figure is touched
// check that we clicked on at least one object
if hitResults.count > 0 && didPickColor {
// retrieved the first clicked object
let result: AnyObject! = hitResults[0]
print("OBJECT tap: \(result.node.name!)")
//Exclude floor and wall from color
if result.node! != floorNode && result.node! != wallNode && result.node! != lateralWallRight && result.node! != lateralWallLeft {
// get its material
let material = result.node!.geometry!.firstMaterial!
print("material: \(material.name!)")
// begin coloration
SCNTransaction.begin()
SCNTransaction.setAnimationDuration(0.5)
// on completion - keep color
SCNTransaction.setCompletionBlock {
SCNTransaction.begin()
SCNTransaction.setAnimationDuration(0.3)
material.diffuse.contents = self.pickedColor
SCNTransaction.commit()
}
SCNTransaction.commit()
material.diffuse.contents = pickedColor
}
}
}
print("-----------------------------------\n")
}
override func prefersStatusBarHidden() -> Bool {
return true
}
override func supportedInterfaceOrientations() -> UIInterfaceOrientationMask {
return .Landscape
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Release any cached data, images, etc that aren't in use.
}
The code starts with a setColor function that catch images from ColorPanelScene.sks (this SKScene has a strange y-axis movement, i don't know why).
func setColors() {
//Color Setup
let ColorWhite = colorPanelScene.childNodeWithName("ColorWhite") as! SKSpriteNode
let ColorRed = colorPanelScene.childNodeWithName("ColorRed") as! SKSpriteNode
let ColorBrown = colorPanelScene.childNodeWithName("ColorBrown")as! SKSpriteNode
let ColorDarkBrown = colorPanelScene.childNodeWithName("ColorDarkBrown")as! SKSpriteNode
let white = UIColor(red:1, green:0.95, blue:0.71, alpha:1)
let brown = UIColor(red:0.49, green:0.26, blue:0.17, alpha:1)
let red = UIColor(red:0.67, green:0.32, blue:0.21, alpha:1)
let darkBrown = UIColor(red:0.27, green:0.25, blue:0.21, alpha:1)
NodesToColors = [
ColorWhite: white,
ColorRed: red,
ColorBrown: brown,
ColorDarkBrown: darkBrown
]
OverlayBackground = colorPanelScene.childNodeWithName("OverlayBackground")as! SKSpriteNode
}
Then, you can see a blur effect function that I would like to add to the panel background. Do you know how to do it to a SKNode? That would be easy if I use UIView instead, but i don't know how to back layer Views.
func blur(image image: UIImage) -> UIImage {
let radius: CGFloat = 20;
let context = CIContext(options: nil);
let inputImage = CIImage(CGImage: image.CGImage!);
let filter = CIFilter(name: "CIGaussianBlur");
filter?.setValue(inputImage, forKey: kCIInputImageKey);
filter?.setValue("\(radius)", forKey:kCIInputRadiusKey);
let result = filter?.valueForKey(kCIOutputImageKey) as! CIImage;
let rect = CGRectMake(radius * 2, radius * 2, image.size.width - radius * 4, image.size.height - radius * 4)
let cgImage = context.createCGImage(result, fromRect: rect);
let returnImage = UIImage(CGImage: cgImage);
return returnImage;
}
If you look at the buttons on ColorPanelScene.sks they have wrong names because I used a workaround to make that panel works. It seems to match color nodes, textures and nodes names in a inverse way.
That's obviously a bad implementation of a side panel. Please, can you help me to build a better interactive panel? Thank You.