Displaying an "ARAnchor" in ARSCNView - swift

In ARKit we can visualise Feature Points' Cloud detected in a ARSession via .showFeaturePoints Type Property:
self.sceneView.debugOptions = [ARSCNDebugOptions.showFeaturePoints]
Also, we can display a coordinate axis visualization indicating the position and orientation of the AR World Coordinate System:
static let showWorldOrigin: SCNDebugOptions
But is it possible to show ARAnchors in ARSCNView?
And if yes, how could we do it?

Just to follow up on #sj-r and #Rickster's comments.
The example code that #Rickster was talking about in regard to the coordinateOrigin.scn is found here: Creating Face Based Experiences
And here is a little snippet I have used before to visualize Axis:
class BMOriginVisualizer: SCNNode {
//----------------------
//MARK: - Initialization
//---------------------
/// Creates An AxisNode To Vizualize ARAnchors
///
/// - Parameter scale: CGFloat
init(scale: CGFloat = 1) {
super.init()
//1. Create The X Axis
let xNode = SCNNode()
let xNodeGeometry = SCNBox(width: 1, height: 0.01, length: 0.01, chamferRadius: 0)
xNode.geometry = xNodeGeometry
xNodeGeometry.firstMaterial?.diffuse.contents = UIColor.red
xNode.position = SCNVector3(0.5, 0, 0)
self.addChildNode(xNode)
//2. Create The Y Axis
let yNode = SCNNode()
let yNodeGeometry = SCNBox(width: 0.01, height: 1, length: 0.01, chamferRadius: 0)
yNode.geometry = yNodeGeometry
yNode.position = SCNVector3(0, 0.5, 0)
yNodeGeometry.firstMaterial?.diffuse.contents = UIColor.green
self.addChildNode(yNode)
//3. Create The Z Axis
let zNode = SCNNode()
let zNodeGeometry = SCNBox(width: 0.01, height: 0.01, length: 1, chamferRadius: 0)
zNode.geometry = zNodeGeometry
zNodeGeometry.firstMaterial?.diffuse.contents = UIColor.blue
zNode.position = SCNVector3(0, 0, 0.5)
self.addChildNode(zNode)
//4. Scale Our Axis
self.scale = SCNVector3(scale, scale, scale)
}
required init?(coder aDecoder: NSCoder) { fatalError("Vizualizer Coder Not Implemented") }
}
Which can be initialised like so:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
let anchorVizualizer = BMOriginVisualizer(scale: 0.5)
node.addChildNode(anchorVizualizer)
}
Hopefully this will provide useful as an expansion to the answer provided by #sj-r.

ARAnchor only represents 'position and orientation'. Things you can see are SCNNodes.
You can attach a node for each anchor you add via a method in ARSCNViewDelegate
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
//create a node so you can visualize the location.
let sphereNode = SCNNode(geometry: SCNSphere(radius: 0.5))
sphereNode.geometry?.firstMaterial?.diffuse.contents = UIColor.blue
return sphereNode
}
This is called after you add an anchor (or when the system adds anchors such as when you have plane detection or image/object detection turned on)
sceneView.session.add(anchor:)

Related

How can I reuse an ARAnchor created for a an ARReferenceObject so that the augmentation of the object can be started later with a button?

Am new to programming so I fear the answer might be obvious but I can’t find it anywhere and would be very grateful for some help!
Am trying to augment a soda can with different AR Experiences that one can activate by pressing on different buttons.
As long as I only add only one AR- experience immediately with the func renderer function (func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode?) everything works fine.
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let objectAnchor = anchor as? ARObjectAnchor {
print ("Yes, we can!")
node.position = SCNVector3Make(objectAnchor.referenceObject.center.x, objectAnchor.referenceObject.center.y, objectAnchor.referenceObject.center.z)
node.geometry = SCNBox(width: 0.05, height: 0.05, length: 0.05 , chamferRadius: 0)
node.geometry?.firstMaterial?.diffuse.contents = UIColor.blue
self.sceneView.scene.rootNode.addChildNode(node)
}
return node
}
The box is attached to the middle of the soda can:
enter image description here
But I don’t want the augmentation to beginn automatically when the soda can is detected but only when the button is pressed. So I tried to split the code. First the soda can is detected and an ARanchor is created and added to an Array:
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let objectAnchor = anchor as? ARObjectAnchor {
print ("Yes, we can!")
node.position = SCNVector3Make(objectAnchor.referenceObject.center.x, objectAnchor.referenceObject.center.y, objectAnchor.referenceObject.center.z)
objectAnchors.append(objectAnchor)
self.sceneView.scene.rootNode.addChildNode(node)
}
return node
}
And then I try to retrieve the ARanchor when I press on the button:
#IBAction func startParkour(_ sender: Any) {
print("Button pressed!")
let objectAnchor = objectAnchors[0]
let node = SCNNode()
node.geometry = SCNBox(width: 0.05, height: 0.05, length: 0.05 , chamferRadius: 0)
node.geometry?.firstMaterial?.diffuse.contents = UIColor.green
node.position = SCNVector3Make(objectAnchor.referenceObject.center.x, objectAnchor.referenceObject.center.y, objectAnchor.referenceObject.center.z)
self.sceneView.scene.rootNode.addChildNode(node)
But the result is unfortunately that now the object is added to point where I held my camera when I pressed the button rather than to the soda can:
enter image description here
Does anyone know what I am doing wrong?

Which image is being used as reference in ARSceneView?

I am currently making an AR app that uses image tracking.
I want it to have 2 images in the reference images folder (Image A: ~670x210, and Image B: ~1123x794).
How do I check which image is the camera detecting?
For example, if the camera is seeing Image A - I want to make a plane twice the size of the physical object, if it's seeing Image B - I want to make the plane the same size as the physical object.
Here's a snippet of my code, if needed:
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Create a session configuration
let configuration = ARImageTrackingConfiguration()
if let trackedImage = ARReferenceImage.referenceImages(inGroupNamed: "ARpaperImage", bundle: Bundle.main) {
configuration.trackingImages = trackedImage
configuration.maximumNumberOfTrackedImages = 1
}
// Run the view's session
sceneView.session.run(configuration)
}
// MARK: - ARSCNViewDelegate
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let imageAnchor = anchor as? ARImageAnchor {
let plane = SCNPlane(width: imageAnchor.referenceImage.physicalSize.width, height: imageAnchor.referenceImage.physicalSize.height)
plane.firstMaterial?.diffuse.contents = UIColor(white: 1.0, alpha: 0,5)
let planeNode = SCNNode(geometry: plane)
planeNode.eulerAngles.x = -.pi / 2
node.addChildNode(planeNode)
}
return node
}
An ARReferenceImage has a name property which you can use to determine which ReferenceImage has been detected which is simply:
A descriptive name for the image
As such when you put your ARReferenceImages into your resource bundle you can and (probably already have) given them a name e.g:
You can the use these names within the following delegate callback to handle your requirements.
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor)
As such lets assume we have two ARReferenceImages called Brown and Choco. Using these names we can then create logic to display different content e.g:
//-------------------------
//MARK: - ARSCNViewDelegate
//-------------------------
extension ViewController: ARSCNViewDelegate{
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have Detected An ARImageAnchor
guard let validAnchor = anchor as? ARImageAnchor, let referenceImageName = validAnchor.referenceImage.name else { return }
//2. Get The Physical Size Of The Reference Image (This Is Specificed When You Create Your Target In The ARResource Bundle)
let physicalSizeOfReferenceImage = CGSize(width: validAnchor.referenceImage.physicalSize.width, height: validAnchor.referenceImage.physicalSize.height)
///3. Log The Data For Debugging Purposes
print("""
Detected ARAnchorID = \(validAnchor.identifier)
Detected Reference Image Name = \(referenceImageName)
Detected Reference Image Physical Size = (width) \(physicalSizeOfReferenceImage.width),
Detected Reference Image Physical Size = (height) \(physicalSizeOfReferenceImage.height)
""")
//4. Perform Instanciation & Other Login Based On The Reference Image Detected
if referenceImageName == "Brown"{
node.addChildNode(planeNodeOfSize(CGSize(width: physicalSizeOfReferenceImage.width * 2, height: physicalSizeOfReferenceImage.height * 2)))
}else if referenceImageName == "Choco"{
node.addChildNode(planeNodeOfSize(CGSize(width: physicalSizeOfReferenceImage.width, height: physicalSizeOfReferenceImage.height)))
}
}
/// Generates An SCNNode With An SCNPlaneGeometry Of A Specified Width & Height
///
/// - Parameter size: CGSize
/// - Returns: SCNNode
func planeNodeOfSize(_ size: CGSize) -> SCNNode{
//1. Create An SCNPlane Of Our Chosen Size
let planeNode = SCNNode()
let planeGeometry = SCNPlane(width: size.width, height: size.height )
planeGeometry.firstMaterial?.diffuse.contents = UIColor.white
planeNode.opacity = 0.25
planeNode.geometry = planeGeometry
//2. Rotate The PlaneNode To Horizontal
planeNode.eulerAngles.x = -.pi/2
//3. Return Our Node
return planeNode
}
}
Hope it helps...

3D Model is shaky with ARKit in Xcode

I am using ARKits image detection to place a 3D object when a certain image is detected. Everything works fine except for the 3D model that is being created. It is shaking like crazy. I double checked and the reference image has the right measures.
I call addModel() when the image is detected. Here is how my code looks like.
Finding reference Image:
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let imageAnchor = anchor as? ARImageAnchor{
let plane = SCNPlane(width: imageAnchor.referenceImage.physicalSize.width, height: imageAnchor.referenceImage.physicalSize.height)
let planeNode = SCNNode(geometry: plane)
addModel(addTo: planeNode)
node.addChildNode(planeNode)
}
return node
}
The addModel() functions looks like this:
func addModel(addTo: SCNNode){
let testScene = SCNScene(named: "art.scnassets/testModel.scn")
let testNode = testScene?.rootNode.childNode(withName: "test", recursively: true)
let testMaterial = SCNMaterial()
testMaterial.diffuse.contents = UIImage(named: "art.scnassets/bricks")
testNode?.geometry?.materials = [testMaterial]
testNode!.position = SCNVector3Zero
testNode!.position.x = -0.3
testNode!.position.z = 0.3
addTo.addChildNode(testNode!)
}
Did your try to delete:
testNode!.position.x = -0.3
testNode!.position.z = 0.3 ?
Because when you type testNode!.position = SCNVector3Zero it is says, that
x = 0.0, y = 0.0, z = 0.0 and after that you type another coordinates.

How to detect touch and show new SCNPlane using ARKit?

Now I am able to show different SCNPlane, when card detected. After displaying SCNPlanes, the user touches any plane to show new SCNPlane. But right now touch is working properly but new SCNPlane is not showing.
Here is the code I've tried:
var cake_1_PlaneNode : SCNNode? = nil
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else { return }
if let imageName = imageAnchor.referenceImage.name {
print(imageName)
if imageName == "menu" {
// Check To See The Detected Size Of Our menu Card (Should By 5cm*3cm)
let menuCardWidth = imageAnchor.referenceImage.physicalSize.width
let menuCardHeight = imageAnchor.referenceImage.physicalSize.height
print(
"""
We Have Detected menu Card With Name \(imageName)
\(imageName)'s Width Is \(menuCardWidth)
\(imageName)'s Height Is \(menuCardHeight)
""")
//raspberry
//cake 1
let cake_1_Plane = SCNPlane(width: 0.045, height: 0.045)
cake_1_Plane.firstMaterial?.diffuse.contents = UIImage(named: "france")
cake_1_Plane.cornerRadius = 0.01
let cake_1_PlaneNode = SCNNode(geometry: cake_1_Plane)
self.cake_1_PlaneNode = cake_1_PlaneNode
cake_1_PlaneNode.eulerAngles.x = -.pi/2
cake_1_PlaneNode.runAction(SCNAction.moveBy(x: 0.15, y: 0, z: -0.125, duration: 0.75))
node.addChildNode(cake_1_PlaneNode)
self.sceneView.scene.rootNode.addChildNode(node)
}
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let touch = touches.first as! UITouch
if(touch.view == self.sceneView){
//print("touch working")
let viewTouchLocation:CGPoint = touch.location(in: sceneView)
guard let result = sceneView.hitTest(viewTouchLocation, options: nil).first else {
return
}
if let planeNode = cake_1_PlaneNode, cake_1_PlaneNode == result.node{
print("match")
cake_1()
}
}
}
func cake_1() {
let plane = SCNPlane(width: 0.15 , height: 0.15)
plane.firstMaterial?.diffuse.contents = UIColor.black.withAlphaComponent(0.75)
let planeNodee = SCNNode(geometry: plane)
planeNodee.eulerAngles.x = -.pi / 2
planeNodee.runAction(SCNAction.moveBy(x: 0.21, y: 0, z: 0, duration: 0))
} //cake_1
Follow this link: Detect touch on SCNNode in ARKit.
Looking at your code I can see several issues (not to mention the naming conventions for your variables and methods).
Firstly, you are creating a Global Variable which you have declared like so:
var cake_1_PlaneNode : SCNNode? = nil
However you use both a Local and Global Variable for your cake_1_PlaneNode in yourDelegate Callback:
let cake_1_PlaneNode = SCNNode(geometry: cake_1_Plane)
self.cake_1_PlaneNode = cake_1_PlaneNode
Which should simply read like so:
self.cake_1_PlaneNode = SCNNode(geometry: cake_1_Plane)
Secondly, you are adding your cake_1_PlaneNode to the rootNode of your ARSCNView rather than your detected ARImageAnchor which is probably what you don't want to do, since when an ARAnchor is detected:
You can provide visual content for the anchor by attaching geometry
(or other SceneKit features) to this node or by adding child nodes.
As such, this method (unless you actually want to do it like this) is unnecessary.
The remaining issues lie within your cake_1 function itself.
Firstly you are not actually adding your planeNodee to your sceneHierachy.
Since you haven't specified whether or not the newly initialised planeNode should be added directly to your ARSCNView or as a childNode of your cake_1_planeNode your function should include one of the following:
self.sceneView.scene.rootNode.addChildNode(planeNodee)
self.cake_1_planeNode.addChildNode(planeNodee)
In addition there is probably also no need to rotate your planeNodee since by default an SCNPlane is rendered vertically.
Since you haven't stipulated where you will be placing your content, it could be that using -.pi / 2 is unnecessary, since this could make it virtually invisible to the naked eye.
One other issue which could also account for you not seeing your node, when you actually add it, is the Z position.
If you set 2 nodes at the same position you will likely experience an issue know as Z-fighting (which you can read more about here). As such you should probably move your added node forward slightly when adding it e.g. SCNVector3 (0,0,0.001)to account for this.
Based on all of these points, I have provided a fully working and commented example below:
import UIKit
import ARKit
//-------------------------
//MARK: - ARSCNViewDelegate
//-------------------------
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have An ARImageAnchor, Then Get It's Reference Image & Name
guard let imageAnchor = anchor as? ARImageAnchor else { return }
let detectedTarget = imageAnchor.referenceImage
guard let detectedTargetName = detectedTarget.name else { return }
//2. If We Have Detected Our Virtual Menu Then Add The CakeOnePlane
if detectedTargetName == "cakeMenu" {
let cakeOnePlaneGeometry = SCNPlane(width: 0.045, height: 0.045)
cakeOnePlaneGeometry.firstMaterial?.diffuse.contents = UIColor.cyan
cakeOnePlaneGeometry.cornerRadius = 0.01
let cakeOnPlaneNode = SCNNode(geometry: cakeOnePlaneGeometry)
cakeOnPlaneNode.eulerAngles.x = -.pi/2
//3. To Allow Us To Easily Keep Track Our Our Currently Added Node We Will Assign It A Unique Name
cakeOnPlaneNode.name = "Strawberry Cake"
node.addChildNode(cakeOnPlaneNode)
cakeOnPlaneNode.runAction(SCNAction.moveBy(x: 0.15, y: 0, z: 0, duration: 0.75))
}
}
}
class ViewController: UIViewController {
#IBOutlet var augmentedRealityView: ARSCNView!
let augmentedRealitySession = ARSession()
let configuration = ARWorldTrackingConfiguration()
//------------------
//MARK: - Life Cycle
//------------------
override func viewDidLoad() {
super.viewDidLoad()
setupARSession()
}
//-----------------
//MARK: - ARSession
//-----------------
/// Runs The ARSession
func setupARSession(){
//1. Load Our Detection Images
guard let detectionImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else { return }
//2. Configure & Run Our ARSession
augmentedRealityView.session = augmentedRealitySession
augmentedRealityView.delegate = self
configuration.detectionImages = detectionImages
augmentedRealitySession.run(configuration, options: [.resetTracking, .removeExistingAnchors])
}
//--------------------
//MARK: - Interaction
//--------------------
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
//1. Get The Current Touch Location & Perform An ARSCNHitTest To Check For Any Hit SCNNode's
guard let currentTouchLocation = touches.first?.location(in: self.augmentedRealityView),
let hitTestNode = self.augmentedRealityView.hitTest(currentTouchLocation, options: nil).first?.node else { return }
//2. If We Have Hit Our Strawberry Cake Then We Call Our makeCakeOnNode Function
if let cakeID = hitTestNode.name, cakeID == "Strawberry Cake"{
makeCakeOnNode(hitTestNode)
}
}
/// Adds An SCNPlane To A Detected Cake Target
///
/// - Parameter node: SCNNode
func makeCakeOnNode(_ node: SCNNode){
let planeGeometry = SCNPlane(width: 0.15 , height: 0.15)
planeGeometry.firstMaterial?.diffuse.contents = UIColor.black.withAlphaComponent(0.75)
let planeNode = SCNNode(geometry: planeGeometry)
planeNode.position = SCNVector3(0, 0, 0.001)
planeNode.runAction(SCNAction.moveBy(x: 0.21, y: 0, z: 0, duration: 0))
node.addChildNode(planeNode)
}
}
Which yields the following on my device:
For your information, this seems to show that your calculations for placing your content are off (unless of course this is the desired result).
As you can see, all of content rendered correctly, however the spacing of these was quite large, and as such you will likely need to pan your device somewhat to see it all when testing and developing further.
Hope it helps...
***Please use descriptive and clear names for your variables and functions, it is very hard to read and understand your code. You can read more about swift styling guidelines here: https://github.com/raywenderlich/swift-style-guide#naming
You are creating a new plane when the user touches the screen, but you are not adding that plane to the scene, therefore your "cake_1()" function only creates a new plane.
When ARKit detects an image, it automatically creates an empty node and adds it to our scene, at the center of the detected image. We must first keep a reference to the node ARKit has added for us when the image is detected.
Add this variable to the top of your class:
var detectedImageNode: SCNNode?
Then in func renderer(renderer: didAdd node:, for anchor:) add the following line:
detectedImageNode = node
Now that we have a reference to the node, we can easily add and remove other nodes.
Add the following line at the end of cake_1():
if let detectedImageNode = detectedImageNode {
cake_1_PlaneNode?.removeFromParentNode()
detectedImageNode.addChildNode(planeNodee)
}
Your final code should look like this:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else { return }
if let imageName = imageAnchor.referenceImage.name {
print(imageName)
if imageName == "menu" {
let cake_1_Plane = SCNPlane(width: 0.045, height: 0.045)
cake_1_Plane.firstMaterial?.diffuse.contents = UIImage(named: "france")
cake_1_Plane.cornerRadius = 0.01
let cake_1_PlaneNode = SCNNode(geometry: cake_1_Plane)
self.cake_1_PlaneNode = cake_1_PlaneNode
cake_1_PlaneNode.eulerAngles.x = -.pi/2
cake_1_PlaneNode.runAction(SCNAction.moveBy(x: 0.15, y: 0, z: -0.125, duration: 0.75))
node.addChildNode(cake_1_PlaneNode)
// No need to add the following line. The node is already added to the scene
//self.sceneView.scene.rootNode.addChildNode(node)
detectedImageNode = node
}
}
}
func cake_1() {
let plane = SCNPlane(width: 0.15 , height: 0.15)
plane.firstMaterial?.diffuse.contents = UIColor.black.withAlphaComponent(0.75)
let planeNodee = SCNNode(geometry: plane)
planeNodee.eulerAngles.x = -.pi / 2
if let detectedImageNode = detectedImageNode {
cake_1_PlaneNode?.removeFromParentNode()
detectedImageNode.addChildNode(planeNodee)
}
}
Alternative solution
If you are just trying to change the image of the plane then an easier way to approach this is to just change the texture of the plane.
Replace the contents of cake_1() with:
if let planeGeometry = cake_1_PlaneNode?.geometry {
planeGeometry.firstMaterial?.diffuse.contents = UIImage(named: "newImage")
}

Scaling SCNNode and its children

I am making a simple measuring app. Currently I place spheres as SCNNodes around the place and between nodes appears a label that displays the length of the line from node 1 to node 2.
This is how the labels are created:
func addLabel() {
let plane = SCNPlane(width: 0.07, height: 0.02)
plane.cornerRadius = plane.height / 10
let sks = SKScene(size: CGSize(width: plane.width * 10e3, height: plane.height * 10e3))
sks.backgroundColor = UIColor(red: 0.1, green: 0.1, blue: 0.1, alpha: 0.7)
currentLbl = SKLabelNode(text: "")
currentLbl.fontSize = 110
currentLbl.fontName = "Helvetica"
currentLbl.verticalAlignmentMode = .center
currentLbl.position = CGPoint(x: sks.frame.midX, y: sks.frame.midY)
currentLbl.fontColor = .white
sks.addChild(currentLbl)
let material = SCNMaterial()
material.isDoubleSided = true
material.diffuse.contents = sks
material.diffuse.contentsTransform = SCNMatrix4Translate(SCNMatrix4MakeScale(1, -1, 1), 0, 1, 0)
plane.materials = [material]
let node = SCNNode(geometry: plane)
node.constraints = [SCNBillboardConstraint()]
node.position = SCNVector3Make(0, 0, 0)
let (minBound, maxBound) = node.boundingBox
node.pivot = SCNMatrix4MakeTranslation( (maxBound.x + minBound.x)/2, minBound.y, 0.02/2)
lblNodes.append(node)
currentLblNode = node
sceneView.scene.rootNode.addChildNode(node)
}
I would like to apply a mathematical equation to the scale of these label nodes (in my update function) to maintain readability from a couple of metres.
var myNodes: [SCNNode] = []
let s = getMagicScalingNumber()
Say I obtained my scale factor as above and I have an array of SCNNodes, how can I scale all the nodes and their respective children so they stay visually proportional.
If SCNTransformConstraint() is an option for this, I would appreciate an example of how to implement it.
Edit: Just to clarify, I have tried
currentLblNode.scale = SCNVector3Make(s, s, s)
which does not seem to work.
I know this is very late, however I have put together an example which should point you in the right direction.
In your example you are essentially creating a holder SCNNode which contains your labels etc.
You can store these into an array of [SCNNode] and then transform the scale of these like so:
/// Updates The Contents Of Each Node Added To Our NodesAdded Array
///
/// - Parameters:
/// - nodes: [SCNNode]
/// - pointOfView: SCNNode
func updateScaleFromCameraForNodes(_ nodes: [SCNNode], fromPointOfView pointOfView: SCNNode){
nodes.forEach { (node) in
//1. Get The Current Position Of The Node
let positionOfNode = SCNVector3ToGLKVector3(node.worldPosition)
//2. Get The Current Position Of The Camera
let positionOfCamera = SCNVector3ToGLKVector3(pointOfView.worldPosition)
//3. Calculate The Distance From The Node To The Camera
let distanceBetweenNodeAndCamera = GLKVector3Distance(positionOfNode, positionOfCamera)
//4. Animate Their Scaling & Set Their Scale Based On Their Distance From The Camera
SCNTransaction.begin()
SCNTransaction.animationDuration = 0.5
switch distanceBetweenNodeAndCamera {
case 0 ... 0.5:
node.simdScale = simd_float3(0.25, 0.25, 0.25)
case 0.5 ... 1:
node.simdScale = simd_float3(0.5, 0.5, 0.5)
case 1 ... 1.5:
node.simdScale = simd_float3(1, 1, 1)
case 1.5 ... 2:
node.simdScale = simd_float3(1.5, 1.5, 1.5)
case 2 ... 2.5:
node.simdScale = simd_float3(2, 2, 2)
case 2.5 ... 3:
node.simdScale = simd_float3(2.5, 2.5, 2.5)
default:
print("Default")
}
SCNTransaction.commit()
}
}
Here I am just setting 'random' values to illustrate the concept of scale depending on the distance from the camera.
To put this better into context here is a little demo I have put together:
//--------------------------
// MARK: - ARSCNViewDelegate
//--------------------------
extension ViewController: ARSCNViewDelegate{
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
if !nodesAdded.isEmpty, let currentCameraPosition = self.sceneView.pointOfView {
updateScaleFromCameraForNodes(nodesAdded, fromPointOfView: currentCameraPosition)
}
}
}
class ViewController: UIViewController {
#IBOutlet var sceneView: ARSCNView!
var nodesAdded = [SCNNode]()
//-----------------------
// MARK: - View LifeCycle
//-----------------------
override func viewDidLoad() {
super.viewDidLoad()
//1. Generate Our Three Box Nodes
generateBoxNodes()
//2. Run The Session
let configuration = ARWorldTrackingConfiguration()
sceneView.session.run(configuration)
sceneView.delegate = self
}
/// Generates Three SCNNodes With An SCNBox Geometry & Places Them In A Holder Node
func generateBoxNodes(){
//1. Create An SCNNode To Hold All Of Our Content
let holderNode = SCNNode()
//2. Create An Array Of Colours For Each Face
let colours: [UIColor] = [.red, .green, .blue, .purple, .cyan, .black]
//3. Create An SCNNode Wih An SCNBox Geometry
let boxNode = SCNNode()
let boxGeometry = SCNBox(width: 0.1, height: 0.1, length: 0.1, chamferRadius: 0.01)
boxNode.geometry = boxGeometry
//4. Create A Different Material For Each Face
var materials = [SCNMaterial]()
for i in 0..<5{
let faceMaterial = SCNMaterial()
faceMaterial.diffuse.contents = colours[i]
materials.append(faceMaterial)
}
//5. Set The Geometries Materials
boxNode.geometry?.materials = materials
//6. Create Two More Nodes By Cloning The First One
let secondBox = boxNode.flattenedClone()
let thirdBox = boxNode.flattenedClone()
//7. Position Them In A Line & Add To The Scene
boxNode.position = SCNVector3(-0.2, 0, 0)
secondBox.position = SCNVector3(0, 0, 0)
thirdBox.position = SCNVector3(0.2, 0, 0)
holderNode.addChildNode(boxNode)
holderNode.addChildNode(secondBox)
holderNode.addChildNode(thirdBox)
holderNode.position = SCNVector3(0, 0, -1)
self.sceneView.scene.rootNode.addChildNode(holderNode)
nodesAdded.append(holderNode)
}
/// Updates The Contents Of Each Node Added To Our NodesAdded Array
///
/// - Parameters:
/// - nodes: [SCNNode]
/// - pointOfView: SCNNode
func updateScaleFromCameraForNodes(_ nodes: [SCNNode], fromPointOfView pointOfView: SCNNode){
nodes.forEach { (node) in
//1. Get The Current Position Of The Node
let positionOfNode = SCNVector3ToGLKVector3(node.worldPosition)
//2. Get The Current Position Of The Camera
let positionOfCamera = SCNVector3ToGLKVector3(pointOfView.worldPosition)
//3. Calculate The Distance From The Node To The Camera
let distanceBetweenNodeAndCamera = GLKVector3Distance(positionOfNode, positionOfCamera)
//4. Animate Their Scaling & Set Their Scale Based On Their Distance From The Camera
SCNTransaction.begin()
SCNTransaction.animationDuration = 0.5
switch distanceBetweenNodeAndCamera {
case 0 ... 0.5:
node.simdScale = simd_float3(0.25, 0.25, 0.25)
case 0.5 ... 1:
node.simdScale = simd_float3(0.5, 0.5, 0.5)
case 1 ... 1.5:
node.simdScale = simd_float3(1, 1, 1)
case 1.5 ... 2:
node.simdScale = simd_float3(1.5, 1.5, 1.5)
case 2 ... 2.5:
node.simdScale = simd_float3(2, 2, 2)
case 2.5 ... 3:
node.simdScale = simd_float3(2.5, 2.5, 2.5)
default:
print("Default")
}
SCNTransaction.commit()
}
}
override func viewWillAppear(_ animated: Bool) { super.viewWillAppear(animated) }
override func viewWillDisappear(_ animated: Bool) { super.viewWillDisappear(animated) }
override func didReceiveMemoryWarning() { super.didReceiveMemoryWarning() }
}
Hopefully it helps...