ARKit Image Detection and Add Image From Assets.xcassets - swift

I am playing around with the code I downloaded from Apple Developer site on AR Image Detection. I am trying to modify it to show a specific image in the AR Resource folder in Resources/Assets.xcassets once an image is detected. I see a similar question was posted 2 years ago and I tried the one and only answer on it but have no success. Can anyone help? Thank-you!
import ARKit
import SceneKit
import UIKit
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
#IBOutlet weak var blurView: UIVisualEffectView!
/// The view controller that displays the status and "restart experience" UI.
lazy var statusViewController: StatusViewController = {
return children.lazy.compactMap({ $0 as? StatusViewController }).first!
}()
/// A serial queue for thread safety when modifying the SceneKit node graph.
let updateQueue = DispatchQueue(label: Bundle.main.bundleIdentifier! +
".serialSceneKitQueue")
/// Convenience accessor for the session owned by ARSCNView.
var session: ARSession {
return sceneView.session
}
// MARK: - View Controller Life Cycle
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
sceneView.session.delegate = self
// Hook up status view controller callback(s).
statusViewController.restartExperienceHandler = { [unowned self] in
self.restartExperience()
}
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
// Prevent the screen from being dimmed to avoid interuppting the AR experience.
UIApplication.shared.isIdleTimerDisabled = true
// Start the AR experience
resetTracking()
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
session.pause()
}
// MARK: - Session management (Image detection setup)
/// Prevents restarting the session while a restart is in progress.
var isRestartAvailable = true
/// Creates a new AR configuration to run on the `session`.
/// - Tag: ARReferenceImage-Loading
func resetTracking() {
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else {
fatalError("Missing expected asset catalog resources.")
}
let configuration = ARWorldTrackingConfiguration()
configuration.detectionImages = referenceImages
session.run(configuration, options: [.resetTracking, .removeExistingAnchors])
statusViewController.scheduleMessage("Look around to detect images", inSeconds: 7.5, messageType: .contentPlacement)
}
// MARK: - ARSCNViewDelegate (Image detection results)
/// - Tag: ARImageAnchor-Visualizing
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else { return }
let referenceImage = imageAnchor.referenceImage
updateQueue.async {
// Create a plane to visualize the initial position of the detected image.
let plane = SCNPlane(width: referenceImage.physicalSize.width,
height: referenceImage.physicalSize.height)
let planeNode = SCNNode(geometry: plane)
planeNode.opacity = 0.25
/*
`SCNPlane` is vertically oriented in its local coordinate space, but
`ARImageAnchor` assumes the image is horizontal in its local space, so
rotate the plane to match.
*/
planeNode.eulerAngles.x = -.pi / 2
/*
Image anchors are not tracked after initial detection, so create an
animation that limits the duration for which the plane visualization appears.
*/
planeNode.runAction(self.imageHighlightAction)
plane.materials = [SCNMaterial()]
plane.materials[0].diffuse.contents = UIImage(named: "Macbook 12-inch")
// Add the plane visualization to the scene.
node.addChildNode(planeNode)
DispatchQueue.main.async {
let imageName = referenceImage.name ?? ""
self.statusViewController.cancelAllScheduledMessages()
self.statusViewController.showMessage("Detected image “\(imageName)”")
}
}
}
var imageHighlightAction: SCNAction {
return .sequence([
.wait(duration: 0.25),
.fadeOpacity(to: 0.85, duration: 0.25),
.fadeOpacity(to: 0.15, duration: 0.25),
.fadeOpacity(to: 0.85, duration: 0.25),
.fadeOut(duration: 0.5),
.removeFromParentNode()
])
}
}

In Xcode's Assets folder, click on + button and create a folder for reference images (use .png or .jpg formats). In Xcode's directories this folder will get .arresourcegroup extention.
// AR and Textures –> AR Resource Group
You can rename this folder. There's no need to put inside this folder Hi-Res images. Appropriate resolution for each image is 400x400. Put there not more than 100 images. That's all.
Your code may look like this:
guard let images = ARReferenceImage.referenceImages(
inGroupNamed: "AR Resources",
bundle: nil)
else { return }
let config = ARWorldTrackingConfiguration()
config.detectionImages = images
config.maximumNumberOfTrackedImages = 3
arView.session.run(config, options: [])

Related

ARKit – How to know if 3d object is in the center of a screen?

I place 3d object in the world space. After that I try to move camera randomly. Then right now I need to know after I knew object has became inside frustum by isNode method, if the object is in center, top or bottom of camera view.
For a solution that's not a hack you can use the projectPoint: API.
It's probably better to work with pixel coordinates because this method uses the actual camera's settings to determine where the object appears on screen.
let projectedPoint = sceneView.projectPoint(self.sphereNode.worldPosition)
let xOffset = projectedPoint.x - screenCenter.x;
let yOffset = projectedPoint.y - screenCenter.y;
if xOffset * xOffset + yOffset * yOffset < R_squared {
// inside a disc of radius 'R' at the center of the screen
}
Solution
To achieve this you need to use a trick. Create new SCNCamera, make it a child of pointOfView default camera and set its FoV to approximately 10 degrees.
Then inside renderer(_:updateAtTime:) instance method use isNode(:insideFrustumOf:) method.
Here's working code:
import ARKit
class ViewController: UIViewController,
ARSCNViewDelegate,
SCNSceneRendererDelegate {
#IBOutlet var sceneView: ARSCNView!
#IBOutlet var label: UILabel!
let cameraNode = SCNNode()
let sphereNode = SCNNode()
let config = ARWorldTrackingConfiguration()
public func renderer(_ renderer: SCNSceneRenderer,
updateAtTime time: TimeInterval) {
DispatchQueue.main.async {
if self.sceneView.isNode(self.sphereNode,
insideFrustumOf: self.cameraNode) {
self.label.text = "In the center..."
} else {
self.label.text = "Out OF CENTER"
}
}
}
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
sceneView.allowsCameraControl = true
let scene = SCNScene()
sceneView.scene = scene
cameraNode.camera = SCNCamera()
cameraNode.camera?.fieldOfView = 10
DispatchQueue.main.asyncAfter(deadline: .now() + 0.5) {
self.sceneView.pointOfView!.addChildNode(self.cameraNode)
}
sphereNode.geometry = SCNSphere(radius: 0.05)
sphereNode.geometry?.firstMaterial?.diffuse.contents = UIColor.red
sphereNode.position.z = -1.0
sceneView.scene.rootNode.addChildNode(sphereNode)
sceneView.session.run(config)
}
}
Also, in this solution you may turn on an orthographic projection for child camera, instead of perspective one. It helps when a model is far from the camera.
cameraNode.camera?.usesOrthographicProjection = true
Here's how your screen might look like:
Next steps
The same way you can append two additional SCNCameras, place them above and below central SCNCamera, and test your object with two extra isNode(:insideFrustumOf:) instance methods.
I solved problem with another way:
let results = self.sceneView.hitTest(screenCenter!, options: [SCNHitTestOption.rootNode: parentnode])
where parentnode is the parent of target node, because I have multiple nodes.
func nodeInCenter() -> SCNNode? {
let x = (Int(sceneView.projectPoint(sceneView.pointOfView!.worldPosition).x - sceneView.projectPoint(sphereNode.worldPosition).x) ^^ 2) < 9
let y = (Int(sceneView.projectPoint(sceneView.pointOfView!.worldPosition).y - sceneView.projectPoint(sphereNode.worldPosition).y) ^^ 2) < 9
if x && y {
return node
}
return nil
}

Can I do ARKit "Continuous Image Tracking" in a World Tracking Configuration with RealityKit?

UPDATE: My premise that "continuous image tracking" is not possible out of the box with RealityKit ARViews was incorrect. All I needed to do was correctly create the AnchorEntity for the continuously tracked reference image.
The anchor entity needs to be created using the init(anchor: ARAnchor) initializer. (The init(world: SIMD3<Float>) initializer is correct for anchors stuck to the real world, but not ones that should track the reference image.)
Using ARKit and RealityKit with an ARWorldTrackingConfiguration, I am trying to do "continuous image tracking" (where the reference image is tracked each frame, and virtual objects can be anchored to it, appearing to be attached to and move with the reference image). Because reference images are only recognized once in world tracking (as opposed to ARImageTrackingConfiguration, where reference images are continuously tracked as long as they are in frame), this is not possible out of the box.
To get the same results in a world tracking configuration, I am anchoring virtual objects to the reference image in the session(_:didAdd:) delegate method, and using the session(_:didUpdate:) delegate method as an opportunity to remove the ARImageAnchor after each time it is identified. This causes the reference image to be re-recognized over and over, allowing virtual objects to be anchored to the image and appear to track it frame-to-frame.
In the example below, I am placing two ball markers to track the position of the reference image. First marker is placed only once, at the location where the reference image is initially detected. The other marker is re-positioned each time the reference image is re-detected, appearing to follow it.
This works. Virtual content tracks the reference image in the ARWorldTrackingConfiguration the same way it would in an image tracking config. But while the "animation" in ARImageTrackingConfiguration is very smooth, the animation in world tracking is much less smooth, more jumpy, as if it was running at 10 or 15 frames per second. (Actual FPS as reported by .showStatistics stays near 60 FPS in both configurations.)
I assume the difference in smoothness results from the time it takes ARKit to do the work of repeatedly re-recognizing and removing the reference image anchor on each didAdd/didUpdate cycle.
I would like to know if there is a better technique to get "continuous image tracking" in an ARWorldTrackingConfiguration, and/or if there is any way I can improve the code in the delegate methods to achieve this affect.
import ARKit
import RealityKit
class ViewController: UIViewController, ARSessionDelegate {
#IBOutlet var arView: ARView!
// originalImageAnchor is used to visualize the first-detected location of reference image
// currentImageAnchor should be continuously updated to match current position of ref image
var originalImageAnchor: AnchorEntity!
var currentImageAnchor: AnchorEntity!
let ballRadius: Float = 0.02
override func viewDidLoad() {
super.viewDidLoad()
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources",
bundle: nil) else { fatalError("Missing expected asset catalog resources.") }
arView.session.delegate = self
arView.automaticallyConfigureSession = false
arView.debugOptions = [.showStatistics]
arView.renderOptions = [.disableCameraGrain, .disableHDR, .disableMotionBlur,
.disableDepthOfField, .disableFaceOcclusions, .disablePersonOcclusion,
.disableGroundingShadows, .disableAREnvironmentLighting]
let configuration = ARWorldTrackingConfiguration()
configuration.detectionImages = referenceImages
configuration.maximumNumberOfTrackedImages = 1 // there is one ref image named "coaster_rb"
arView.session.run(configuration)
}
func session(_ session: ARSession, didAdd anchors: [ARAnchor]) {
guard let imageAnchor = anchors[0] as? ARImageAnchor else { return }
// Reference image detected. This will happen multiple times because
// we delete ARImageAnchor in session(_:didUpdate:)
if let imageName = imageAnchor.name, imageName == "coaster_rb" {
// If originalImageAnchor is nil, create an anchor and
// add a marker at initial position of reference image.
if originalImageAnchor == nil {
originalImageAnchor = AnchorEntity(world: imageAnchor.transform)
let originalImageMarker = generateBallMarker(radius: ballRadius, color: .systemPink)
originalImageMarker.position.y = ballRadius + (ballRadius * 2)
originalImageAnchor.addChild(originalImageMarker)
arView.scene.addAnchor(originalImageAnchor)
}
// If currentImageAnchor is nil, add an anchor and marker at reference image position
// If currentImageAnchor has already been added, adjust it's position to match ref image
if currentImageAnchor == nil {
currentImageAnchor = AnchorEntity(world: imageAnchor.transform)
let currentImageMarker = generateBallMarker(radius: ballRadius, color: .systemTeal)
currentImageMarker.position.y = ballRadius
currentImageAnchor.addChild(currentImageMarker)
arView.scene.addAnchor(currentImageAnchor)
} else {
currentImageAnchor.setTransformMatrix(imageAnchor.transform, relativeTo: nil)
}
}
}
func session(_ session: ARSession, didUpdate anchors: [ARAnchor]) {
guard let imageAnchor = anchors[0] as? ARImageAnchor else { return }
// Delete reference image anchor to allow for ongoing tracking as it moves
if let imageName = imageAnchor.name, imageName == "coaster_rb" {
arView.session.remove(anchor: anchors[0])
}
}
func generateBallMarker(radius: Float, color: UIColor) -> ModelEntity {
let ball = ModelEntity(mesh: .generateSphere(radius: radius),
materials: [SimpleMaterial(color: color, isMetallic: false)])
return ball
}
}
Continuous image tracking does work out of the box with RealityKit ARViews in world tracking configurations. A mistake in my original code lead me to think otherwise.
Incorrect anchor entity initialization (for what I was trying to accomplish):
currentImageAnchor = AnchorEntity(world: imageAnchor.transform)
Since I wanted to track the ARImageAnchor assigned to the matched reference image, I should have done it like this:
currentImageAnchor = AnchorEntity(anchor: imageAnchor)
The corrected example below places one virtual marker that is fixed to the reference image's initial position, and another that smoothly tracks the reference image in a world tracking configuration:
import ARKit
import RealityKit
class ViewController: UIViewController, ARSessionDelegate {
#IBOutlet var arView: ARView!
let ballRadius: Float = 0.02
override func viewDidLoad() {
super.viewDidLoad()
guard let referenceImages = ARReferenceImage.referenceImages(
inGroupNamed: "AR Resources", bundle: nil) else {
fatalError("Missing expected asset catalog resources.")
}
arView.session.delegate = self
arView.automaticallyConfigureSession = false
arView.debugOptions = [.showStatistics]
arView.renderOptions = [.disableCameraGrain, .disableHDR,
.disableMotionBlur, .disableDepthOfField,
.disableFaceOcclusions, .disablePersonOcclusion,
.disableGroundingShadows, .disableAREnvironmentLighting]
let configuration = ARWorldTrackingConfiguration()
configuration.detectionImages = referenceImages
configuration.maximumNumberOfTrackedImages = 1
arView.session.run(configuration)
}
func session(_ session: ARSession, didAdd anchors: [ARAnchor]) {
guard let imageAnchor = anchors[0] as? ARImageAnchor else { return }
if let imageName = imageAnchor.name, imageName == "target_image" {
// AnchorEntity(world: imageAnchor.transform) results in anchoring
// virtual content to the real world. Content anchored like this
// will remain in position even if the reference image moves.
let originalImageAnchor = AnchorEntity(world: imageAnchor.transform)
let originalImageMarker = makeBall(radius: ballRadius, color: .systemPink)
originalImageMarker.position.y = ballRadius + (ballRadius * 2)
originalImageAnchor.addChild(originalImageMarker)
arView.scene.addAnchor(originalImageAnchor)
// AnchorEntity(anchor: imageAnchor) results in anchoring
// virtual content to the ARImageAnchor that is attached to the
// reference image. Content anchored like this will appear
// stuck to the reference image.
let currentImageAnchor = AnchorEntity(anchor: imageAnchor)
let currentImageMarker = makeBall(radius: ballRadius, color: .systemTeal)
currentImageMarker.position.y = ballRadius
currentImageAnchor.addChild(currentImageMarker)
arView.scene.addAnchor(currentImageAnchor)
}
}
func makeBall(radius: Float, color: UIColor) -> ModelEntity {
let ball = ModelEntity(mesh: .generateSphere(radius: radius),
materials: [SimpleMaterial(color: color, isMetallic: false)])
return ball
}
}

Reality Composer - Custom Collision Between Entities of Different Scenes

I'm pretty new to RealityKit and ARKit. I have two scenes in Reality Composer, one with a book image anchor and one with a horizontal plane anchor. The first scene with an image anchor has a cube attached to the top of it and the second scene built on a horizontal plane has two rings. All objects have a fixed collision. I'd like to run an animation when the rings and the cube touch. I couldn't find a way to do this in Reality Composer, so I made two attempts within the code to no avail. (I'm printing "collision started" just to test the collision code without the animation) Unfortunately, it didn't work. Would appreciate help on this.
Attempt #1:
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
let componentBreakdownAnchor = try! CC.loadComponentBreakdown()
arView.scene.anchors.append(componentBreakdownAnchor)
let bookAnchor = try! CC.loadBook()
arView.scene.anchors.append(bookAnchor)
let ringsAnchor = try! CC.loadRings()
arView.scene.anchors.append(ringsAnchor)
// Add the componentBreakdown anchor to the scene
arView.scene.anchors.append(componentBreakdownAnchor)
let bookAnchor = try! CC.loadBook()
arView.scene.anchors.append(bookAnchor)
let ringsAnchor = try! CC.loadRings()
arView.scene.anchors.append(ringsAnchor)
let _ = ringsAnchor.scene?.subscribe(
to: CollisionEvents.Began.self,
on: bookAnchor
) { event in
print("collision started")
}
return arView
}
Attempt #2
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
let componentBreakdownAnchor = try! CC.loadComponentBreakdown()
arView.scene.anchors.append(componentBreakdownAnchor)
let bookAnchor = try! CC.loadBook()
arView.scene.anchors.append(bookAnchor)
let ringsAnchor = try! CC.loadRings()
arView.scene.anchors.append(ringsAnchor)
// Add the componentBreakdown anchor to the scene
arView.scene.anchors.append(componentBreakdownAnchor)
let bookAnchor = try! CC.loadBook()
arView.scene.anchors.append(bookAnchor)
let ringsAnchor = try! CC.loadRings()
arView.scene.anchors.append(ringsAnchor)
arView.scene.subscribe(
to: CollisionEvents.Began.self,
on: bookAnchor
) { event in
print("collision started")
}
return arView
}
RealityKit scene
If you want to use models' collisions made in RealityKit's scene from scratch, at first you need to implement a HasCollision protocol.
Let's see what a developer documentation says about it:
HasCollision protocol is an interface used for ray casting and collision detection.
Here's how your implementation should look like if you generate models in RealityKit:
import Cocoa
import RealityKit
class CustomCollision: Entity, HasModel, HasCollision {
let color: NSColor = .gray
let collider: ShapeResource = .generateSphere(radius: 0.5)
let sphere: MeshResource = .generateSphere(radius: 0.5)
required init() {
super.init()
let material = SimpleMaterial(color: color,
isMetallic: true)
self.components[ModelComponent] = ModelComponent(mesh: sphere,
materials: [material])
self.components[CollisionComponent] = CollisionComponent(shapes: [collider],
mode: .trigger,
filter: .default)
}
}
Reality Composer scene
And here's how your code should look like if you use models from Reality Composer:
import UIKit
import RealityKit
import Combine
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
var subscriptions: [Cancellable] = []
override func viewDidLoad() {
super.viewDidLoad()
let groundSphere = try! Experience.loadStaticSphere()
let upperSphere = try! Experience.loadDynamicSphere()
let gsEntity = groundSphere.children[0].children[0].children[0]
let usEntity = upperSphere.children[0].children[0].children[0]
// CollisionComponent exists in case you turn on
// "Participates" property in Reality Composer app
print(gsEntity)
let gsComp: CollisionComponent = gsEntity.components[CollisionComponent]!.self
let usComp: CollisionComponent = usEntity.components[CollisionComponent]!.self
gsComp.shapes = [.generateBox(size: [0.05, 0.07, 0.05])]
usComp.shapes = [.generateBox(size: [0.05, 0.05, 0.05])]
gsEntity.components.set(gsComp)
usEntity.components.set(usComp)
let subscription = self.arView.scene.subscribe(to: CollisionEvents.Began.self,
on: gsEntity) { event in
print("Balls' collision occured!")
}
self.subscriptions.append(subscription)
arView.scene.anchors.append(upperSphere)
arView.scene.anchors.append(groundSphere)
}
}

Place image from Gallery on a Wall using ARKit

I have a list of images coming from server and stored in gallery. I want to pick any image and place on live wall using ARKit and want to convert images into 3d images tp perform operation like zooming , moving image etc.
Can anybody please guide how can I create custom object in AR?
To detect vertical surfaces (e.g walls) in ARKit you need to firstly set up ARWorldTrackingConfiguration and then enable planeDetection within your app.
So under your Class Declaration you would create the following variables:
#IBOutlet var augmentedRealityView: ARSCNView!
let augmentedRealitySession = ARSession()
let configuration = ARWorldTrackingConfiguration()
And then initialise your ARSession and in ViewDidLoad for example e.g:
override func viewDidLoad() {
super.viewDidLoad()
//1. Set Up Our ARSession
augmentedRealityView.session = augmentedRealitySession
//2. Assign The ARSCNViewDelegate
augmentedRealityView.delegate = self
//3. Set Up Plane Detection
configuration.planeDetection = .vertical
//4. Run Our Configuration
augmentedRealitySession.run(configuration, options: [.resetTracking, .removeExistingAnchors])
}
Now that you are all set to detected vertical planes you need to hook into the following ARSCNViewDelegate Method:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) { }
Which simply:
Tells the delegate that a SceneKit node corresponding to a new AR
anchor has been added to the scene.
In this method we are going to explicitly look for any ARPlaneAnchors which have been detected which provide us with:
Information about the position and orientation of a real-world flat
surface detected in a world-tracking AR session.
As such placing an SCNPlane onto a detected vertical plane is a simple as this:
//-------------------------
//MARK: - ARSCNViewDelegate
//-------------------------
extension ViewController: ARSCNViewDelegate{
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have Detected An ARPlaneAnchor
guard let planeAnchor = anchor as? ARPlaneAnchor else { return }
//2. Get The Size Of The ARPlaneAnchor
let width = CGFloat(planeAnchor.extent.x)
let height = CGFloat(planeAnchor.extent.z)
//3. Create An SCNPlane Which Matches The Size Of The ARPlaneAnchor
let imageHolder = SCNNode(geometry: SCNPlane(width: width, height: height))
//4. Rotate It
imageHolder.eulerAngles.x = -.pi/2
//5. Set It's Colour To Red
imageHolder.geometry?.firstMaterial?.diffuse.contents = UIColor.red
//4. Add It To Our Node & Thus The Hiearchy
node.addChildNode(imageHolder)
}
}
Applying This To Your Case:
In your case we need to do some additional work, as you want to be able to allow the user to apply an image to the vertical plane.
As such your best bet is to make the node you have just added a variable e.g.
class ViewController: UIViewController {
#IBOutlet var augmentedRealityView: ARSCNView!
let augmentedRealitySession = ARSession()
let configuration = ARWorldTrackingConfiguration()
var nodeWeCanChange: SCNNode?
}
As such your Delegate Callback might look like so:
//-------------------------
//MARK: - ARSCNViewDelegate
//-------------------------
extension ViewController: ARSCNViewDelegate{
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. If We Havent Create Our Interactive Node Then Proceed
if nodeWeCanChange == nil{
//a. Check We Have Detected An ARPlaneAnchor
guard let planeAnchor = anchor as? ARPlaneAnchor else { return }
//b. Get The Size Of The ARPlaneAnchor
let width = CGFloat(planeAnchor.extent.x)
let height = CGFloat(planeAnchor.extent.z)
//c. Create An SCNPlane Which Matches The Size Of The ARPlaneAnchor
nodeWeCanChange = SCNNode(geometry: SCNPlane(width: width, height: height))
//d. Rotate It
nodeWeCanChange?.eulerAngles.x = -.pi/2
//e. Set It's Colour To Red
nodeWeCanChange?.geometry?.firstMaterial?.diffuse.contents = UIColor.red
//f. Add It To Our Node & Thus The Hiearchy
node.addChildNode(nodeWeCanChange!)
}
}
}
Now you have a reference to the nodeWeCanChange setting it's image at anytime is simple!
Each SCNGeometry has a set of Materials which are a:
A set of shading attributes that define the appearance of a geometry's
surface when rendered.
In our case we are looking for the materials diffuse property which is:
An object that manages the material’s diffuse response to lighting.
And then the contents property which are:
The visual contents of the material property—a color, image, or source
of animated content.
Obviously you need to handle the full logistics of this, however a very basic example might look like so assuming you stored your Images into an Array of UIImage e.g:
let imageGallery = [UIImage(named: "StackOverflow"), UIImage(named: "GitHub")]
I have created an IBAction which will change the image of our SCNNode's Geometry based on the tag of the UIButton pressed e.g:
/// Changes The Material Of Our SCNNode's Gemeotry To The Image Selected By The User
///
/// - Parameter sender: UIButton
#IBAction func changeNodesImage(_ sender: UIButton){
guard let imageToApply = imageGallery[sender.tag], let node = nodeWeCanChange else { return}
node.geometry?.firstMaterial?.diffuse.contents = imageToApply
}
This is more than enough to point you in the right direction... Hope it helps...

ARKit detecting intersection between planes

I am using ARKit (with Scene Kit) and am trying to find a way to get the intersection between an ARReference image and a Horizontal ARPlaneDetection to display a 3D character on the surface directly in front of the detected image, e.g., Spawn inside the red circle see image below
At the moment I am able to get the character to spawn in front of the detected image, however, the character is floating in the air instead of standing on the surface.
let realWorldPositon = SCNVector3Make(anchor.transform.columns.3.x, anchor.transform.columns.3.y, anchor.transform.columns.3.z)
let hitTest = self.sceneView.scene.rootNode.hitTestWithSegment(from: self.sceneView.scene.rootNode.worldPosition, to: realWorldPositon, options: nil)
overlayNode.position = SCNVector3Make((hitTest.first?.worldCoordinates.x)!, 0, (hitTest.first?.worldCoordinates.z)!)
self.sceneView.scene.rootNode.addChildNode(overlayNode)
Any help on this would be greatly appreciated, thanks!
Example project
I think you were on the right lines using the hitTestWithSegment function to detect an intersection between the ARImageAnchor and the ARPlaneAnchor.
Rather than trying to explain each step of my attempt at an answer, I have provided code which is fully commented, so it should be fairly self explanatory.
My example works fairly well (although its certainly not perfect) and will definitely need some tweaking.
For example, you will need to look at determining more accurately the distance from the ARReferenceImage to the ARPlaneAnchor etc.
I can get the model (a Pokemon) to place at the correct level and fairly close to the front of the ARReferenceImage, although it will need tweaking.
Having said this, I think this will be a fairly good base for you to start refining the code and getting more accurate results.
Of note however, is that I have just enabled one ARPlaneAnchor to be detected (just for simplicities sake) and have assumed that you will be detecting a plane infront of your image marker.
I haven't taken into account rotation or anything like that. And of course, based on your proposed scenario; it also assumes your image would be on a desk or some other flat surface.
Anyway, here is my answer (hopefully it should be fairly self explanatory):
import UIKit
import ARKit
//-----------------------
//MARK: ARSCNViewDelegate
//-----------------------
extension ViewController: ARSCNViewDelegate{
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. If We Have Detected Our ImageTarget Then Create A Plane To Visualize It
if let currentImageAnchor = anchor as? ARImageAnchor {
createReferenceImagePlaneForNode(currentImageAnchor, node: node)
allowTracking = true
}
//2. If We Have Detected A Horizontal Plane Then Create One
if let currentPlaneAnchor = anchor as? ARPlaneAnchor{
if planeNode == nil && !createdModel{ createReferencePlaneForNode(currentPlaneAnchor, node: node) }
}
}
func renderer(_ renderer: SCNSceneRenderer, didUpdate node: SCNNode, for anchor: ARAnchor) {
//1. Check To See Whether An ARPlaneAnchor Has Been Updated
guard let anchor = anchor as? ARPlaneAnchor,
//2. Check It Is Our PlaneNode
let existingPlane = planeNode,
//3. Get The Geometry Of The PlaneNode
let planeGeometry = existingPlane.geometry as? SCNPlane else { return }
//4. Adjust It's Size & Positions
planeGeometry.width = CGFloat(anchor.extent.x)
planeGeometry.height = CGFloat(anchor.extent.z)
planeNode?.position = SCNVector3Make(anchor.center.x, 0.01, anchor.center.z)
}
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
//1. Detect The Intersection Of The ARPlaneAnchor & ARImageAncho
if allowTracking { detectIntersetionOfImageTarget() }
}
}
//---------------------------------------
//MARK: Model Generation & Identification
//---------------------------------------
extension ViewController {
/// Detects If We Have Intersected A Valid Image Target
func detectIntersetionOfImageTarget(){
//If We Havent Created Our Model Then Check To See If We Have Detected An Existing Plane
if !createdModel{
//a. Perform A HitTest On The Center Of The Screen For AnyExisting Planes
guard let planeHitTest = self.augmentedRealityView.hitTest(screenCenter, types: .existingPlaneUsingExtent).first,
let planeAnchor = planeHitTest.anchor as? ARPlaneAnchor else { return }
//b. Get The Transform Of The ARPlane Anchor
let x = planeAnchor.transform.columns.3.x
let y = planeAnchor.transform.columns.3.y
let z = planeAnchor.transform.columns.3.z
//b. Create The Anchors Vector
let anchorVector = SCNVector3(x,y, z)
//Perform Another HitTest From The ImageAnchor Vector To The Anchors Vector
if let _ = self.augmentedRealityView.scene.rootNode.hitTestWithSegment(from: imageAnchorVector, to: anchorVector, options: nil).first?.node {
//a. If We Havent Created The Model Then Place It As Soon As An Intersection Occures
if createdModel == false{
//b. Load The Model
loadModelAtVector(SCNVector3(imageAnchorVector.x, y, imageAnchorVector.z))
createdModel = true
planeNode?.removeFromParentNode()
}
}
}
}
}
class ViewController: UIViewController {
//1. Reference To Our ImageTarget Bundle
let AR_BUNDLE = "AR Resources"
//2. Vector To Store The Position Of Our Detected Image
var imageAnchorVector: SCNVector3!
//3. Variables To Allow Tracking & To Determine Whether Our Model Has Been Placed
var allowTracking = false
var createdModel = false
//4. Create A Reference To Our ARSCNView In Our Storyboard Which Displays The Camera Feed
#IBOutlet weak var augmentedRealityView: ARSCNView!
//5. Create Our ARWorld Tracking Configuration
let configuration = ARWorldTrackingConfiguration()
//6. Create Our Session
let augmentedRealitySession = ARSession()
//7. ARReference Images
lazy var staticReferenceImages: Set<ARReferenceImage> = {
let images = ARReferenceImage.referenceImages(inGroupNamed: AR_BUNDLE, bundle: nil)
return images!
}()
//8. Scrren Center Reference
var screenCenter: CGPoint!
//9. PlaneNode
var planeNode: SCNNode?
//--------------------
//MARK: View LifeCycle
//--------------------
override func viewDidLoad() {
super.viewDidLoad()
//1. Get Reference To The Center Of The Screen For RayCasting
DispatchQueue.main.async { self.screenCenter = CGPoint(x: self.view.bounds.width/2, y: self.view.bounds.height/2) }
//2. Setup Our ARSession
setupARSessionWithStaticImages()
}
override func didReceiveMemoryWarning() { super.didReceiveMemoryWarning() }
//---------------------------------
//MARK: ARImageAnchor Vizualization
//---------------------------------
/// Creates An SCNPlane For Visualizing The Detected ARImageAnchor
///
/// - Parameters:
/// - imageAnchor: ARImageAnchor
/// - node: SCNNode
func createReferenceImagePlaneForNode(_ imageAnchor: ARImageAnchor, node: SCNNode){
//1. Get The Targets Width & Height
let width = imageAnchor.referenceImage.physicalSize.width
let height = imageAnchor.referenceImage.physicalSize.height
//2. Create A Plane Geometry To Cover The ARImageAnchor
let planeNode = SCNNode()
let planeGeometry = SCNPlane(width: width, height: height)
planeGeometry.firstMaterial?.diffuse.contents = UIColor.white
planeNode.opacity = 0.5
planeNode.geometry = planeGeometry
//3. Rotate The PlaneNode To Horizontal
planeNode.eulerAngles.x = -.pi/2
//4. The Node Is Centered In The Anchor (0,0,0)
node.addChildNode(planeNode)
//5. Store The Vector Of The ARImageAnchor
imageAnchorVector = SCNVector3(imageAnchor.transform.columns.3.x, imageAnchor.transform.columns.3.y, imageAnchor.transform.columns.3.z)
let fadeOutAction = SCNAction.fadeOut(duration: 5)
planeNode.runAction(fadeOutAction)
}
//-------------------------
//MARK: Plane Visualization
//-------------------------
/// Creates An SCNPlane For Visualizing The Detected ARAnchor
///
/// - Parameters:
/// - imageAnchor: ARAnchor
/// - node: SCNNode
func createReferencePlaneForNode(_ anchor: ARPlaneAnchor, node: SCNNode){
//1. Get The Anchors Width & Height
let width = CGFloat(anchor.extent.x)
let height = CGFloat(anchor.extent.z)
//2. Create A Plane Geometry To Cover The ARImageAnchor
planeNode = SCNNode()
let planeGeometry = SCNPlane(width: width, height: height)
planeGeometry.firstMaterial?.diffuse.contents = UIColor.white
planeNode?.opacity = 0.5
planeNode?.geometry = planeGeometry
//3. Rotate The PlaneNode To Horizontal
planeNode?.eulerAngles.x = -.pi/2
//4. The Node Is Centered In The Anchor (0,0,0)
node.addChildNode(planeNode!)
}
//-------------------
//MARK: Model Loading
//-------------------
/// Loads Our Model Based On The Resulting Vector Of Our ARAnchor
///
/// - Parameter worldVector: SCNVector3
func loadModelAtVector(_ worldVector: SCNVector3) {
let modelPath = "ARModels.scnassets/Scatterbug.scn"
//1. Get The Reference To Our SCNScene & Get The Model Root Node
guard let model = SCNScene(named: modelPath),
let pokemonModel = model.rootNode.childNode(withName: "RootNode", recursively: false) else { return }
//2.Add It To Our SCNView
augmentedRealityView.scene.rootNode.addChildNode(pokemonModel)
//3. Scale The Scatterbug
pokemonModel.scale = SCNVector3(0.003, 0.003, 0.003)
pokemonModel.position = worldVector
augmentedRealityView.scene.rootNode.addChildNode(pokemonModel)
}
//---------------
//MARK: ARSession
//---------------
/// Sets Up The AR Session With Static Or Dynamic AEImages
func setupARSessionWithStaticImages(){
//1. Set Our Configuration
configuration.detectionImages = staticReferenceImages
configuration.planeDetection = .horizontal
//2. Run The Configuration
augmentedRealitySession.run(configuration, options: [.resetTracking, .removeExistingAnchors])
//3. Set The Session & Delegate
augmentedRealityView?.session = augmentedRealitySession
self.augmentedRealityView?.delegate = self
}
}
Hope it points you in the right direction...