Face position using visionKit in ARKit - swift

I added visionKit face detection on an ARSCNView, it cab detect the face, here how I did that
public func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
let faceDetectionRequest = VNDetectFaceRectanglesRequest(completionHandler: { (request: VNRequest, error: Error?) in
DispatchQueue.main.async {
self.faceLayers.forEach { drawing in
drawing.removeFromSuperlayer()
}
if let observations = request.results as? [VNFaceObservation] {
self.handleFaceDetectionObservations(observations: observations)
}
}
})
guard let capturedImage = sceneView.session.currentFrame?.capturedImage else { return }
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: capturedImage, orientation: .leftMirrored, options: [:])
do {
try imageRequestHandler.perform([faceDetectionRequest])
} catch {
print("perform fail, error: ", error.localizedDescription)
}
}
fileprivate func handleFaceDetectionObservations(observations: [VNFaceObservation]) {
for observation in observations {
let newWidth = sceneView.bounds.width * observation.boundingBox.width
let newHeight = sceneView.bounds.height * observation.boundingBox.height
let newX = sceneView.bounds.width * observation.boundingBox.origin.x
let newY = sceneView.bounds.height * observation.boundingBox.origin.y
let faceRectConverted = CGRect(x: newX, y: newY, width: newWidth, height: newHeight)
let faceRectanglePath = CGPath(rect: faceRectConverted, transform: nil)
let faceLayer = CAShapeLayer()
faceLayer.path = faceRectanglePath
faceLayer.fillColor = UIColor.black.cgColor
self.faceLayers.append(faceLayer)
self.sceneView.layer.addSublayer(faceLayer)
}
}
The only issue that I have here is the face position in the view, it's calculated wrong. Looks like the problem come from camera mirroring, when the goes right, the face rectangular goes left, or when the face goes up, the rectangular goes down. I don't know how to do the right calculation to tie observation rect to the right place in sceneView . Could anyone help me on that!
Also have a same problem on landscape, the rectangular height is more compact there...
Thanks

Related

show masking on object which is between camera and wall using RealityKit

I made a video for generating a floor plan in which I need to capture the wall and floor together at a certain position if a user is too near to the wall or if any object come between the camera and wall/floor then need to show Too Close mask on that object something like display in this video.
I try to use rycast in session(_ session: ARSession, didUpdate frame: ARFrame) method but I am very new in AR and not know which method we need to use.
func session(_ session: ARSession, didUpdate frame: ARFrame) {
guard let query = self.arView?.makeRaycastQuery(from: self.arView?.center ?? CGPoint.zero,
allowing: .estimatedPlane,
alignment: .any)
else { return }
guard let raycastResult = self.arView?.session.raycast(query).first
else { return }
let currentPositionOfCamera = raycastResult.worldTransform.getPosition()
if currentPositionOfCamera != .zero {
let distanceFromCamera = frame.camera.transform.getPosition().distanceFrom(position: currentPositionOfCamera)
print("Distance from raycast:",distanceFromCamera)
if (distance < 0.5) {
print("Too Close")
}
}
}
I am just learning ARKit and RealityKit as well, but wouldn't your code be:
let currentPositionOfCamera = self.arView.cameraTransform.translation
if currentPositionOfCamera != .zero {
// distance is defined in simd as the distance between 2 points
let distanceFromCamera = distance(raycastResult.worldTransform.position, currentPositionOfCamera)
print("Distance from raycast:",distanceFromCamera)
if (distanceFromCamera < 0.5) {
print("Too Close")
let rayDirection = normalize(raycastResult.worldTransform.position - self.arView.cameraTransform.translation)
// This pulls the text back toward the camera from the plane
let textPositionInWorldCoordinates = result.worldTransform.position - (rayDirection * 0.1)
let textEntity = self.model(for: classification)
// This scales the text so it is of a consistent size
textEntity.scale = .one * raycastDistance
var textPositionWithCameraOrientation = self.arView.cameraTransform
textPositionWithCameraOrientation.translation = textPositionInWorldCoordinates
// self.textAnchor is defined somewhere in the class as an optional
self.textAnchor = AnchorEntity(world: textPositionWithCameraOrientation.matrix)
textAnchor.addChild(textEntity)
self.arView.scene.addAnchor(textAnchor)
} else {
guard let textAnchor = self.textAnchor else { return }
self.removeAnchor(textAnchor)
}
}
// Creates a text ModelEntity
func tooCloseModel() -> ModelEntity {
let lineHeight: CGFloat = 0.05
let font = MeshResource.Font.systemFont(ofSize: lineHeight)
let textMesh = MeshResource.generateText("Too Close", extrusionDepth: Float(lineHeight * 0.1), font: font)
let textMaterial = SimpleMaterial(color: classification.color, isMetallic: true)
let model = ModelEntity(mesh: textMesh, materials: [textMaterial])
// Center the text
model.position.x -= model.visualBounds(relativeTo: nil).extents.x / 2
return model
}
This code is adapted from Apple's Visualizing Scene Semantics.

Add SCNNode or UIView to the center of SCNView in order to detect other SCNNodes

I am trying to center a UIView to the center of my SCNView in Order to detect the other added SCNTorus nodes in my scene.
I added a view to the center of my sceneView like below
var focusPoint: CGPoint {
return CGPoint(
x: sceneView.bounds.size.width / 2,
y: sceneView.bounds.size.height - (sceneView.bounds.size.height / 1.618))
}
Then I tried two ways :
1 -
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
DispatchQueue.main.async { [weak self] in
guard let strongSelf = self else { return }
if !strongSelf.inEditMode { return }
for node in strongSelf.selectionRingsNodes {
let projectedPoint = renderer.projectPoint(node.position)
let projectedCGPoint = CGPoint(x: CGFloat(projectedPoint.x), y: CGFloat(projectedPoint.y))
let distance = projectedCGPoint.distance(to: strongSelf.focusPoint)
if distance < 20 {
print(node.name)
}
}
}
}
2 -
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
DispatchQueue.main.async { [weak self] in
guard let strongSelf = self else { return }
if !strongSelf.inEditMode { return }
for node in strongSelf.selectionRingsNodes {
let (min, max) = node.boundingBox
let projectedMinPoint = renderer.projectPoint(min)
let projectedMinCGPoint = CGPoint(x: CGFloat(projectedMinPoint.x), y: CGFloat(projectedMinPoint.y))
let projectedMaxPoint = renderer.projectPoint(max)
let projectedMaxCGPoint = CGPoint(x: CGFloat(projectedMaxPoint.x), y: CGFloat(projectedMaxPoint.y))
let minX = CGFloat(projectedMinCGPoint.x)
let maxX = CGFloat(projectedMaxCGPoint.x)
let minY = CGFloat(projectedMinCGPoint.y)
let maxY = CGFloat(projectedMaxCGPoint.y)
let nodeRect = CGRect(x: minX, y: minY, width: maxX - minX, height: maxY - minY)
if nodeRect.contains(strongSelf.focusPoint) {
print(node.name)
}
}
}
}
These two methods return the wrong results, a very big distance, and very big x and y.
Finally I Got the solution!
It turned out that I should convert the position to the scene’s world coordinate space, using this method convertPosition(SCNVector3,to:)
Here the complete code :
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
DispatchQueue.main.async { [weak self] in
guard let strongSelf = self else { return }
if !strongSelf.inEditMode { return }
for node in strongSelf.selectionRingsNodes {
let position = node.convertPosition(SCNVector3Zero, to: nil)
let projectedPoint = renderer.projectPoint(position)
let projectedCGPoint = CGPoint(x: CGFloat(projectedPoint.x), y: CGFloat(projectedPoint.y))
let distance = projectedCGPoint.distance(to: strongSelf.focusPoint)
if distance < 50 {
strongSelf.showToast(message: node.getTopMostParentNode().name!, font: .systemFont(ofSize: 30))
}
}
}
}

Get Size of image in SCNNode / ARKit Swift

I'm trying to scan a Reference-Image an then display the image itself above the printed reference-image. The "virutal" image size should be the same like the printed size.
My idea: get the size of the printed Reference-Image, then scale the image in the SCNNode to this size (or scale the SCNNode to this size?)
But: 1-> How to get the size of the printed image, 2-> for scaling the SCNNode I need the size of this node, too. How to get it?
import UIKit
import SceneKit
import ARKit
import AVKit
import AVFoundation
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
private var planeNode: SCNNode?
private var imageNode: SCNNode?
private var animationInfo: AnimationInfo?
private var currentMediaName: String?
private var scrollView: UIScrollView!
override func viewDidLoad() {
super.viewDidLoad()
let scene = SCNScene()
sceneView.scene = scene
sceneView.delegate = self
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Load reference images to look for from "AR Resources" folder
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else {
fatalError("Missing expected asset catalog resources.")
}
// Create a session configuration
let configuration = ARWorldTrackingConfiguration()
// Add previously loaded images to ARScene configuration as detectionImages
configuration.detectionImages = referenceImages
// Run the view's session
sceneView.session.run(configuration)
let tap = UITapGestureRecognizer(target: self, action: #selector(handleTap(rec:)))
//Add recognizer to sceneview
sceneView.addGestureRecognizer(tap)
}
//Method called when tap
#objc func handleTap(rec: UITapGestureRecognizer){
let location: CGPoint = rec.location(in: sceneView)
let hits = self.sceneView.hitTest(location, options: nil)
if !hits.isEmpty{
let tappedNode = hits.first?.node
if tappedNode != nil && tappedNode?.name != nil{
let stringArr = tappedNode?.name?.components(separatedBy: "-")
let name = stringArr! [0]
let size = stringArr! [1].components(separatedBy: ",")
let width = Float(size [0])
let height = Float(size [1])
loadReferenceImage(tappedNode: tappedNode!, name: (name), width: width!, height: height!)
}
}
}
private func playVideo() {
guard let path = Bundle.main.path(forResource: "video", ofType:"m4v") else {
debugPrint("video.m4v not found")
return
}
let player = AVPlayer(url: URL(fileURLWithPath: path))
let playerController = AVPlayerViewController()
playerController.player = player
present(playerController, animated: true) {
player.play()
}
}
func loadReferenceImage(tappedNode: SCNNode, name: String, width: Float, height: Float){
print("TAP")
print(name)
let currentNode = tappedNode.parent
if let image = UIImage(named: "col" + name){
let childNodes = currentNode?.childNodes
for node in (childNodes)!{
node.removeFromParentNode()
}
let newImage = UIImage(named: "col" + name)
let newnode = SCNNode(geometry: SCNPlane(width: CGFloat(width), height: CGFloat(height)))
newnode.geometry?.firstMaterial?.diffuse.contents = newImage
newnode.scale = SCNVector3(x: 10, y: 10, z: 10)
currentNode?.removeAnimation(forKey: "spin_around")
let rotation = SCNVector3((currentNode?.eulerAngles.x)!-0.95,(currentNode?.eulerAngles.y)!,(currentNode?.eulerAngles.z)!)
currentNode?.eulerAngles = rotation
//SIZE??????
let nodex = currentNode?.scale.x
let nodey = currentNode?.scale.y
let nodez = currentNode?.scale.z
let factorx = width / nodex!
let factory = height / nodey!
currentNode?.addChildNode(newnode)
}
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else {
return
}
// 1. Load scene.
let planeScene = SCNScene(named: "art.scnassets/plane.scn")!
let planeNode = planeScene.rootNode.childNode(withName: "planeRootNode", recursively: true)!
// 2. Calculate size based on planeNode's bounding box.
let (min, max) = planeNode.boundingBox
let size = SCNVector3Make(max.x - min.x, max.y - min.y, max.z - min.z)
// 3. Calculate the ratio of difference between real image and object size.
// Ignore Y axis because it will be pointed out of the image.
let widthRatio = Float(imageAnchor.referenceImage.physicalSize.width)/1.2
let heightRatio = Float(imageAnchor.referenceImage.physicalSize.height)/1.2
let width = imageAnchor.referenceImage.physicalSize.width
let height = imageAnchor.referenceImage.physicalSize.height
let prefix = "-"
let imageSize = width.description + "," + height.description
let targetName = imageAnchor.referenceImage.name! + prefix + imageSize
// Pick smallest value to be sure that object fits into the image.
let finalRatio = [widthRatio, heightRatio].min()!
// 4. Set transform from imageAnchor data.
planeNode.transform = SCNMatrix4(imageAnchor.transform)
// 5. Animate appearance by scaling model from 0 to previously calculated value.
let appearanceAction = SCNAction.scale(to: CGFloat(finalRatio), duration: 0.4)
//test
appearanceAction.timingMode = .easeOut
// Set initial scale to 0.
planeNode.scale = SCNVector3Make(0 , 0, 0)
//rotate y
let spin = CABasicAnimation(keyPath: "rotation")
spin.fromValue = NSValue(scnVector4: SCNVector4(x: 0, y: 1, z: 0, w: 0))
spin.toValue = NSValue(scnVector4: SCNVector4(x: 0, y: 1, z: 0, w: Float(CGFloat(2 * Double.pi))))
spin.duration = 4
spin.repeatCount = .infinity
planeNode.addAnimation(spin, forKey: "spin_around")
// Add to root node.
sceneView.scene.rootNode.addChildNode(planeNode)
// Run the appearance animation.
planeNode.runAction(appearanceAction)
planeNode.name = targetName
let nodes = planeNode.childNodes
for node in nodes{
node.name = targetName
}
self.planeNode = planeNode
self.imageNode = node
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor, updateAtTime time: TimeInterval) {
guard let imageNode = imageNode, let planeNode = planeNode else {
return
}
// 1. Unwrap animationInfo. Calculate animationInfo if it is nil.
guard let animationInfo = animationInfo else {
refreshAnimationVariables(startTime: time,
initialPosition: planeNode.simdWorldPosition,
finalPosition: imageNode.simdWorldPosition,
initialOrientation: planeNode.simdWorldOrientation,
finalOrientation: imageNode.simdWorldOrientation)
return
}
// 2. Calculate new animationInfo if image position or orientation changed.
if !simd_equal(animationInfo.finalModelPosition, imageNode.simdWorldPosition) || animationInfo.finalModelOrientation != imageNode.simdWorldOrientation {
refreshAnimationVariables(startTime: time,
initialPosition: planeNode.simdWorldPosition,
finalPosition: imageNode.simdWorldPosition,
initialOrientation: planeNode.simdWorldOrientation,
finalOrientation: imageNode.simdWorldOrientation)
}
// 3. Calculate interpolation based on passedTime/totalTime ratio.
let passedTime = time - animationInfo.startTime
var t = min(Float(passedTime/animationInfo.duration), 1)
// Applying curve function to time parameter to achieve "ease out" timing
t = sin(t * .pi * 0.5)
// 4. Calculate and set new model position and orientation.
let f3t = simd_make_float3(t, t, t)
planeNode.simdWorldPosition = simd_mix(animationInfo.initialModelPosition, animationInfo.finalModelPosition, f3t)
planeNode.simdWorldOrientation = simd_slerp(animationInfo.initialModelOrientation, animationInfo.finalModelOrientation, t)
//planeNode.simdWorldOrientation = imageNode.simdWorldOrientation
guard let currentImageAnchor = anchor as? ARImageAnchor else { return }
}
func refreshAnimationVariables(startTime: TimeInterval, initialPosition: float3, finalPosition: float3, initialOrientation: simd_quatf, finalOrientation: simd_quatf) {
let distance = simd_distance(initialPosition, finalPosition)
// Average speed of movement is 0.15 m/s.
let speed = Float(0.15)
// Total time is calculated as distance/speed. Min time is set to 0.1s and max is set to 2s.
let animationDuration = Double(min(max(0.1, distance/speed), 2))
// Store animation information for later usage.
animationInfo = AnimationInfo(startTime: startTime,
duration: animationDuration,
initialModelPosition: initialPosition,
finalModelPosition: finalPosition,
initialModelOrientation: initialOrientation,
finalModelOrientation: finalOrientation)
}
}
In order to do this I believe that first you need to get the size in Pixels of the UIImage by
multiplying the size values by the value in the scale property to get
the pixel dimensions of the image.
As such an example would be something like so:
guard let image = UIImage(named: "launchScreen") else { return }
let pixelWidth = image.size.width * image.scale
let pixelHeight = image.size.height * image.scale
print(pixelWidth, pixelHeight)
The size of my image when made in Adobe Illustrator was 3072 x 4099, and when I logged the results in the console the dimensions were also the same.
Now the tricky part here is calculating the pixels to a size we can use in ARKit, remembering that different devices have a different PPI (Pixels Per Inch) density.
In my example I am just going to use the PPI of an iPhone7Plus which is 401.
//1. Get The PPI Of The iPhone7Plus
let iphone7PlusPixelsPerInch: CGFloat = 401
//2. To Get The Image Size In Inches We Need To Divide By The PPI
let inchWidth = pixelWidth/iphone7PlusPixelsPerInch
let inchHeight = pixelHeight/iphone7PlusPixelsPerInch
//3. Calculate The Size In Metres (There Are 2.54 Cm's In An Inch)
let widthInMetres = (inchWidth * 2.54) / 100
let heightInMeters = (inchHeight * 2.54) / 100
Now we have the size of our Image in Metres it is simple to create an SCNNode of that size e.g:
//1. Generate An SCNPlane With The Same Size As Our Image
let realScaleNode = SCNNode(geometry: SCNPlane(width: widthInMetres, height: heightInMeters))
realScaleNode.geometry?.firstMaterial?.diffuse.contents = image
realScaleNode.position = SCNVector3(0, 0, -1)
//2. Add It To Our Hierachy
self.augmentedRealityView.scene.rootNode.addChildNode(realScaleNode)
Hope it helps...
P.S: This may be useful for helping you get the PPI of the Screen (marchv/UIScreenExtension)

How to play local video When Image is recognized using Arkit in Swift?

I have image recognizes by using AR kit ,when detect image I need to show and play the video on presented scene (like above the detected image)?
lazy var fadeAndSpinAction: SCNAction = {
return .sequence([
.fadeIn(duration: fadeDuration),
.rotateBy(x: 0, y: 0, z: CGFloat.pi * 360 / 180, duration: rotateDuration),
.wait(duration: waitDuration),
.fadeOut(duration: fadeDuration)
])
}()
lazy var fadeAction: SCNAction = {
return .sequence([
.fadeOpacity(by: 0.8, duration: fadeDuration),
.wait(duration: waitDuration),
.fadeOut(duration: fadeDuration)
])
}()
lazy var fishNode: SCNNode = {
guard let scene = SCNScene(named: "Catfish1.scn"),
let node = scene.rootNode.childNode(withName: "Catfish1", recursively: false) else { return SCNNode() }
let scaleFactor = 0.005
node.scale = SCNVector3(scaleFactor, scaleFactor, scaleFactor)
node.eulerAngles.x = -.pi / 2
return node
}()
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
sceneView.delegate = self
configureLighting()
}
func configureLighting() {
sceneView.autoenablesDefaultLighting = true
sceneView.automaticallyUpdatesLighting = true
}
override func viewWillAppear(_ animated: Bool) {
resetTrackingConfiguration()
}
func resetTrackingConfiguration() {
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else { return }
let configuration = ARWorldTrackingConfiguration()
configuration.detectionImages = referenceImages
let options: ARSession.RunOptions = [.resetTracking, .removeExistingAnchors]
sceneView.session.run(configuration, options: options)
statusLabel.text = "Move camera around to detect images"
}
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
DispatchQueue.main.async {
guard let imageAnchor = anchor as? ARImageAnchor,
let imageName = imageAnchor.referenceImage.name else { return }
// TODO: Overlay 3D Object
let overlayNode = self.getNode(withImageName: imageName)
overlayNode.opacity = 0
overlayNode.position.y = 0.2
overlayNode.runAction(self.fadeAndSpinAction)
node.addChildNode(overlayNode)
self.statusLabel.text = "Image detected: \"\(imageName)\""
self.videoNode.geometry = SCNPlane(width: 1276.0 / 2.0, height: 712.0 / 2.0)
self.spriteKitScene.scaleMode = .aspectFit
self.videoSpriteKitNode?.position = CGPoint(x: self.spriteKitScene.size.width / 2.0, y: self.spriteKitScene.size.height / 2.0)
self.videoSpriteKitNode?.size = self.spriteKitScene.size
self.spriteKitScene.addChild(self.videoSpriteKitNode!)
self.videoNode.geometry?.firstMaterial?.diffuse.contents = self.spriteKitScene
var transform = SCNMatrix4MakeRotation(Float(M_PI), 0.0, 0.0, 1.0)
transform = SCNMatrix4Translate(transform, 1.0, 1.0, 0)
self.videoNode.geometry?.firstMaterial?.diffuse.contentsTransform = transform
self.videoNode.position = SCNVector3(x: 0, y: 30, z: 7)
node.addChildNode(self.videoNode)
self.videoSpriteKitNode?.play()
}
}
func getPlaneNode(withReferenceImage image: ARReferenceImage) -> SCNNode {
let plane = SCNPlane(width: image.physicalSize.width,
height: image.physicalSize.height)
let node = SCNNode(geometry: plane)
return node
}`
Looking at your code, firstly you are setting your SCNPlane to be 638 Metres wide and 356 Meters tall, I'm sure thats not what you actually want ^________^.
Anyway, here is an example of playing a local video using an SKScene & SKVideoNode which works well:
//--------------------------
// MARK: - ARSCNViewDelegate
//--------------------------
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have An ARImageAnchor And Have Detected Our Reference Image
guard let imageAnchor = anchor as? ARImageAnchor else { return }
let referenceImage = imageAnchor.referenceImage
//2. Get The Physical Width & Height Of Our Reference Image
let width = CGFloat(referenceImage.physicalSize.width)
let height = CGFloat(referenceImage.physicalSize.height)
//3. Create An SCNNode To Hold Our Video Player With The Same Size As The Image Target
let videoHolder = SCNNode()
let videoHolderGeometry = SCNPlane(width: width, height: height)
videoHolder.transform = SCNMatrix4MakeRotation(-Float.pi / 2, 1, 0, 0)
videoHolder.geometry = videoHolderGeometry
//4. Create Our Video Player
if let videoURL = Bundle.main.url(forResource: "BlackMirrorz", withExtension: "mp4"){
setupVideoOnNode(videoHolder, fromURL: videoURL)
}
//5. Add It To The Hierarchy
node.addChildNode(videoHolder)
}
/// Creates A Video Player As An SCNGeometries Diffuse Contents
///
/// - Parameters:
/// - node: SCNNode
/// - url: URL
func setupVideoOnNode(_ node: SCNNode, fromURL url: URL){
//1. Create An SKVideoNode
var videoPlayerNode: SKVideoNode!
//2. Create An AVPlayer With Our Video URL
let videoPlayer = AVPlayer(url: url)
//3. Intialize The Video Node With Our Video Player
videoPlayerNode = SKVideoNode(avPlayer: videoPlayer)
videoPlayerNode.yScale = -1
//4. Create A SpriteKitScene & Postion It
let spriteKitScene = SKScene(size: CGSize(width: 600, height: 300))
spriteKitScene.scaleMode = .aspectFit
videoPlayerNode.position = CGPoint(x: spriteKitScene.size.width/2, y: spriteKitScene.size.height/2)
videoPlayerNode.size = spriteKitScene.size
spriteKitScene.addChild(videoPlayerNode)
//6. Set The Nodes Geoemtry Diffuse Contenets To Our SpriteKit Scene
node.geometry?.firstMaterial?.diffuse.contents = spriteKitScene
//5. Play The Video
videoPlayerNode.play()
videoPlayer.volume = 0
}
}
Update:
If you want to place the video above the target you can do something like the following:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have An ARImageAnchor And Have Detected Our Reference Image
guard let imageAnchor = anchor as? ARImageAnchor else { return }
let referenceImage = imageAnchor.referenceImage
//2. Get The Physical Width & Height Of Our Reference Image
let width = CGFloat(referenceImage.physicalSize.width)
let height = CGFloat(referenceImage.physicalSize.height)
//3. Create An SCNNode To Hold Our Video Player
let videoHolder = SCNNode()
let planeHeight = height/2
let videoHolderGeometry = SCNPlane(width: width, height: planeHeight)
videoHolder.transform = SCNMatrix4MakeRotation(-Float.pi / 2, 1, 0, 0)
videoHolder.geometry = videoHolderGeometry
//4. Place It About The Target
let zPosition = height - (planeHeight/2)
videoHolder.position = SCNVector3(0, 0, -zPosition)
//5. Create Our Video Player
if let videoURL = Bundle.main.url(forResource: "BlackMirrorz", withExtension: "mp4"){
setupVideoOnNode(videoHolder, fromURL: videoURL)
}
//5. Add It To The Hierachy
node.addChildNode(videoHolder)
}
Hope it helps...

ARKit Calculate distance from a wall to the camera

I’m developing a project with ARKit. I want to calculate the measure from a wall to the camera and it updates when I move away or I move closer.
Now, i have activated that it detects horizontal and vertical surfaces. When I get a surface, I calculate the distance from the camera position and the center of the surface. After I apply the calculus that it gets the distance between 2 points in a 3D space (Euclidean).
https://math.stackexchange.com/questions/42640/calculate-distance-in-3d-space
Is it correct? Can you help me?
class ViewController: UIViewController, ARSCNViewDelegate, ARSessionDelegate {
let configuration = ARWorldTrackingConfiguration()
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
configuration.planeDetection = [.horizontal, .vertical]
sceneView.session.run(configuration)
......
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let planeAnchor = anchor as? ARPlaneAnchor else { return }
let plane = SCNPlane(width: CGFloat(planeAnchor.extent.x), height:
CGFloat(planeAnchor.extent.z))
let planeNode = SCNNode(geometry: plane)
planeNode.simdPosition = float3(planeAnchor.center.x, 0,
planeAnchor.center.z)
planeNode.eulerAngles.x = -.pi / 2
node.addChildNode(planeNode)
let distance = distanceFromCamera(x: planeAnchor.center.x, y: 0, z: planeAnchor.center.z)
let formatted = String(format: "Distance: %.2f", distance)
print(formatted) q
}
private func distanceFromCamera(x: Float, y:Float, z:Float) -> Float {
let cameraPosition = self.sceneView.session.currentFrame!.camera.transform.columns.3
print("Camera: \(cameraPosition)")
let vector = SCNVector3Make(cameraPosition.x - x, cameraPosition.y - y, cameraPosition.z - z)
// Scene units map to meters in ARKit.
return sqrtf(vector.x * vector.x + vector.y * vector.y + vector.z * vector.z)
}
}
Add Following method
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
guard let currentBall = self.currentBall else {return}
DispatchQueue.main.async {
if let centerPosition = self.hitTestCenterVector() {
let startPositionOfBall = currentBall.position
let distance = self.getDistanceBetween(vector1: centerPosition, vector2: startPositionOfBall)
self.lblDistance.text = String(format: "%.1f", distance) //meter
}
}
}
Just replace self.currentBall in guard statement with your SCNNode it is from where you want to cal. distance
Now This is method to for calculations
func hitTestCenterVector () -> SCNVector3? {
let results = self.sceneView.hitTest(self.sceneView.center, types: .existingPlane)
if let firstObject = results.first {
return SCNVector3(firstObject.worldTransform.columns.3.x, firstObject.worldTransform.columns.3.y, firstObject.worldTransform.columns.3.z)
}
return nil
}
func getDistanceBetween(vector1:SCNVector3, vector2:SCNVector3) -> CGFloat {
return CGFloat(sqrt((vector1.x - vector2.x) * (vector1.x - vector2.x)
+ (vector1.y - vector2.y) * (vector1.y - vector2.y)
+ (vector1.z - vector2.z) * (vector1.z - vector2.z)))
}
Hope it is helpful