How to create AR labels for ModelEntity in RealityKit? - swift

I have a very simple app which puts down an .rcproject file.
import ARKit
import RealityKit
class ViewController: UIViewController {
private var marLent: Bool = false
private lazy var arView: ARView = {
let arview = ARView()
arview.translatesAutoresizingMaskIntoConstraints = false
arview.isUserInteractionEnabled = true
return arview
}()
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
let scene = try! Experience.loadScene()
arView.scene.anchors.append(scene)
configureUI()
setupARView()
}
private func configureUI() {
view.addSubview(arView)
arView.translatesAutoresizingMaskIntoConstraints = false
NSLayoutConstraint.activate([
arView.topAnchor.constraint(equalTo: view.topAnchor),
arView.leadingAnchor.constraint(equalTo: view.leadingAnchor),
arView.bottomAnchor.constraint(equalTo: view.bottomAnchor),
arView.trailingAnchor.constraint(equalTo: view.trailingAnchor),
])
}
private func setupARView() {
arView.automaticallyConfigureSession = false
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = [.horizontal]
configuration.environmentTexturing = .automatic
arView.session.run(configuration)
}
}
How could I create a label for the placed down Entity which looks like something like these. So basically have a text that points on the entity and the text would be the entity's name for example.

There are 4 ways to create info dots with text plates for AR scenes. Here's an animated .gif.
the first way – using Autodesk Maya with pre-installed USD plugin (it's the most preferable way, because you can apply both animation and Python scripting techniques);
the second one – using Reality Composer (it's quite fast way, but you won't be able to exactly replicate info dots animation, like in Apple's .reality sample files);
the third one – programmatically in RealityKit;
the fourth way – programmatically using Pythonic USD Schema.
Nonetheless, for brevity, let's see how we can do it in Reality Composer app.
Reality Composer's behaviors
In Reality Composer's scene, drag and drop .png files with transparency (8-bit RGBA) to create an info dot and an info plate – each file will be turned into plane with its corresponding image. After that, you can apply Reality Composer's behaviors to any separate part of your model.
Create first custom behavior with a Scene Start trigger, then add LookAtCamera and Hide actions (when scene starts, both, a cylinder primitive and info plate must be hidden).
Create second behavior with a Tap trigger, then add LookAtCamera, Show, Wait and Hide actions (three actions must be merged together). If you tap an info dot, both hidden objects will be shown with the help of fade in/out animation.
Final step: save the scene as .reality file.
Hope, now you have an idea how it's done.

Related

RealityKit .nonAR installGestures is missing translation and rotation is y axis only

I'm trying to reverse engineer the 3d Scanner App using RealityKit and am having real trouble getting just a basic model working with all gestures. When I run the code below, I get a cube with scale and rotation (about the y axis only), but no translation interaction. I'm trying to figure out how to get rotation about an arbitray axis as well as translation, like in the 3d Scanner App above. I'm relatively new to iOS and read one should use RealityKit as Apple isn't really supporting SceneKit anymore, but am now wondering if SceneKit would be the way to go, as RealityKit is still young. Or if anyone knows of an extension to RealityKit ModelEntity objects to give them better interaction capabilities.
I've got my app taking a scan with the LiDAR sensor and saving it to disk as a .usda mesh, per this tutorial, but when I load the mesh as a ModelEntity and attach gestures to it, I don't get any interaction at all.
The below example code recreates the limited gestures for a box ModelEntity, and I have some commented lines showing where I would load my .usda model from disk, but again while it will render, it gets no interaction with gestures.
Any help appreciated!
// ViewController.swift
import UIKit
import RealityKit
class ViewController: UIViewController {
var arView: ARView!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
arView = ARView(frame: view.frame, cameraMode: .nonAR, automaticallyConfigureSession: false)
view.addSubview(arView)
// create pointlight
let pointLight = PointLight()
pointLight.light.intensity = 10000
// create light anchor
let lightAnchor = AnchorEntity(world: [0, 0, 0])
lightAnchor.addChild(pointLight)
arView.scene.addAnchor(lightAnchor)
// eventually want to load my model from disk and give it gestures.
// guard let scanEntity = try? Entity.loadModel(contentsOf: urlOBJ) else {
// print("couldn't load scan in this format")
// return
// }
// entity to add gestures to
let cubeMaterial = SimpleMaterial(color: .blue, isMetallic: true)
let myEntity = ModelEntity(mesh: .generateBox(width: 0.1, height: 0.2, depth: 0.3, cornerRadius: 0.01, splitFaces: false), materials: [cubeMaterial])
myEntity.generateCollisionShapes(recursive: false)
let myAnchor = AnchorEntity(world: .zero)
myAnchor.addChild(myEntity)
// add collision and interaction
let scanEntityBounds = myEntity.visualBounds(relativeTo: myAnchor)
myEntity.collision = CollisionComponent(shapes: [.generateBox(size: scanEntityBounds.extents).offsetBy(translation: scanEntityBounds.center)])
arView.installGestures(for: myEntity).forEach {
gestureRecognizer in
gestureRecognizer.addTarget(self, action: #selector(handleGesture(_:)))
}
arView.scene.addAnchor(myAnchor)
// without this, get no gestures at all
let camera = PerspectiveCamera()
let cameraAnchor = AnchorEntity(world: [0, 0, 0.2])
cameraAnchor.addChild(camera)
arView.scene.addAnchor(cameraAnchor)
}
#objc private func handleGesture(_ recognizer: UIGestureRecognizer) {
if recognizer is EntityTranslationGestureRecognizer {
print("translation!")
} else if recognizer is EntityScaleGestureRecognizer {
print("scale!")
} else if recognizer is EntityRotationGestureRecognizer {
print("rotation!")
}
}
}
To extend ModelEntity's gesture interaction capabilities setup your own 2D gestures. There are 8 screen gestures in UIKit, and in SwiftUI you have 5 principal gestures and additionally Sequence, Simultaneous and Exclusive variations.
Form what I have understood, that the gestures are working for the box but not for your .usdz file/model. If this is the case, then the issue is because the model does not have a collision mesh(HasCollsion). If you are using reality composer to edit your models, you could do the following:
click on the model
under the Physics dropdown, click Participate
under collision shape select automatic
Overalls, make sure that the model has collision and you cast within the code that it has collision
let myEntity = try? Entity.loadModel(named: "fileName") as! HasCollision

Where is the .camera AnchorEntity located?

When adding a child to my AnchorEntity(.camera), it appears as if the child is spawning behind my camera (meaning I can only see my child when I turn around). I have also tried to add a mesh to my Anchor directly but unfortunately ARKit / RealityKit does not render the mesh when you are inside of it (which because its centered around the camera, is theoretically always the case. However, it could also be the case that its always located behind the screen [where the user is] and I'm never able to see it).
Also, oddly enough the child entity does not move with the camera AnchorEntity despite setting the translation transform to (0,0,0).
My two questions are:
Is the .camera anchor actually located right where the physical iPad / camera is located or is it located further back (perhaps where the user would normally hold the iPad)?
How do you get a child entity of the AnchorEntity(.camera) to move as the iPad / camera moves in real space?
Answer to the first question
In RealityKit and ARKit frameworks ARCamera has a pivot point like other entities (nodes) have, and it's located at the point where lens is attached to the camera body (at bayonet level). This pivot can tether AnchorEntity(.camera). In other words, virtual camera and real-world camera have that pivot point approximately at the same place.
So, if you attach RealityKit's AnchorEntity to a camera's pivot, you place it to the coordinates where camera's bayonet is located. And this AnchorEntity(.camera) will be tracked automatically without a need to implement session(_:didUpdate:) method.
However, if attach ARKit's ARAnchor to the camera's pivot, you have to implement session(_:didUpdate:) method to constantly update a position and orientation of that anchor for every ARFrame.
Answer to the second question
If you want to constantly update model's position in RealityKits at 60 fps (when ARCamera moves and rotates) you need to use the following approach:
import ARKit
import RealityKit
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
override func viewDidLoad() {
super.viewDidLoad()
let box = MeshResource.generateBox(size: 0.25)
let material = SimpleMaterial(color: .systemPink, isMetallic: true)
let boxEntity = ModelEntity(mesh: box, materials: [material])
let cameraAnchor = AnchorEntity(.camera) // ARCamera anchor
cameraAnchor.addChild(boxEntity)
arView.scene.addAnchor(cameraAnchor)
boxEntity.transform.translation = [0, 0,-0.5] // Box offset 0.5 m
}
}
Or you can use ARKit's great old .currentFrame instance property in session(_:didUpdate:) delegate method:
extension ViewController: ARSessionDelegate {
func session(_ session: ARSession, didUpdate anchors: [ARAnchor]) {
guard let transform = arView.session.currentFrame?.camera.transform
else { return }
let arkitAnchor = ARAnchor(transform: transform)
arView.session.add(anchor: arkitAnchor) // add to session
let anchor = AnchorEntity(anchor: arkitAnchor)
anchor.addChild(boxEntity)
arView.scene.addAnchor(anchor) // add to scene
}
}
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
var boxEntity = ModelEntity(...)
override func viewDidLoad() {
super.viewDidLoad()
arView.session.delegate = self // Session's delegate
}
}
To find out how to save the ARCamera Pose over time, read the following post.

Multi-face detection in RealityKit

I have added content to the face anchor in Reality Composer, later on, after loading the Experience that i created on Reality Composer, i create a face tracking session like this:
guard ARFaceTrackingConfiguration.isSupported else { return }
let configuration = ARFaceTrackingConfiguration()
configuration.maximumNumberOfTrackedFaces = ARFaceTrackingConfiguration.supportedNumberOfTrackedFaces
configuration.isLightEstimationEnabled = true
arView.session.delegate = self
arView.session.run(configuration, options: [.resetTracking, .removeExistingAnchors])
It is not adding the content to all the faces that is detecting, and i know it is detecting more than one face because the other faces occlude the content that is stick to the other face, is this a limitation on RealityKit or i am missing something in the composer? actually is pretty hard to miss somehting since it is so basic and simple.
Thanks.
You can't succeed in multi-face tracking in RealityKit in case you use models with embedded Face Anchor, i.e. the models that came from Reality Composer' Face Tracking preset (you can use just one model with .face anchor, not three). Or you MAY USE such models but you need to delete these embedded AnchorEntity(.face) anchors. Although there's a better approach – simply load models in .usdz format.
Let's see what Apple documentation says about embedded anchors:
You can manually load and anchor Reality Composer scenes using code, like you do with other ARKit content. When you anchor a scene in code, RealityKit ignores the scene's anchoring information.
Reality Composer supports 5 anchor types: Horizontal, Vertical, Image, Face & Object. It displays a different set of guides for each anchor type to help you place your content. You can change the anchor type later if you choose the wrong option or change your mind about how to anchor your scene.
There are two options:
In new Reality Composer project, deselect the Create with default content checkbox at the bottom left of the action sheet you see at startup.
In RealityKit code, delete existing Face Anchor and assign a new one. The latter option is not great because you need to recreate objects positions from scratch:
boxAnchor.removeFromParent()
Nevertheless, I've achieved a multi-face tracking using AnchorEntity() with ARAnchor intializer inside session(:didUpdate:) instance method (just like SceneKit's renderer() instance method).
Here's my code:
import ARKit
import RealityKit
extension ViewController: ARSessionDelegate {
func session(_ session: ARSession, didUpdate anchors: [ARAnchor]) {
guard let faceAnchor = anchors.first as? ARFaceAnchor
else { return }
let anchor1 = AnchorEntity(anchor: faceAnchor)
let anchor2 = AnchorEntity(anchor: faceAnchor)
anchor1.addChild(model01)
anchor2.addChild(model02)
arView.scene.anchors.append(anchor1)
arView.scene.anchors.append(anchor2)
}
}
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
let model01 = try! Entity.load(named: "angryFace") // USDZ file
let model02 = try! FacialExpression.loadSmilingFace() // RC scene
override func viewDidLoad() {
super.viewDidLoad()
arView.session.delegate = self
guard ARFaceTrackingConfiguration.isSupported else {
fatalError("Alas, Face Tracking isn't supported")
}
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
let config = ARFaceTrackingConfiguration()
config.maximumNumberOfTrackedFaces = 2
arView.session.run(config)
}
}

Assigning SCNScene to SCNView - found nil while unwrapping Optional value

Recently, I decided to apply my previous knowledge in C++ and Python to learning Swift. After which, I decided to see what I could do with the SceneKit framework. After hours of checking through the documentation, and consulting a tutorial, I have to wonder what's going wrong with my code:
class GameViewController: UIViewController {
var gameView:SCNView!
var gameScene:SCNScene!
var cameraNode:SCNNode!
override func viewDidLoad() {
super.viewDidLoad()
initScene()
initView()
initCamera()
}
func initView() {
//initialize the game view - this view holds everything else in the game!
gameView = self.view as! SCNView
//allow the camera to move to gestures - mainly for testing purposes
gameView.allowsCameraControl = true
//use default lighting while still practicing
gameView.autoenablesDefaultLighting = true
}
func initScene() {
//initialize the scene
gameScene = SCNScene()
//set the scen in the gameView object to the scene created by this function
gameView.scene = gameScene
}
func initCamera() {
//create a node that will become the camera
cameraNode = SCNNode()
//since a node can be any object in the scene, this needs to be set up as a camera
cameraNode.camera = SCNCamera()
cameraNode.position = SCNVector3 (x:0, y:5, z:15)
}
}
After more checking through the documentation and making sure that I was now copying from the tutorial directly to get it to work, I still have no luck with this. According to a lot of the other questions I found here on StackOverflow, it looks like it has something to do with the forced unwrapping, the exclamation points, but I'm not exactly sure why that is.
I've probably been staring the answer in the face combing through this documentation, but I'm not quite seeing what the problem is.
Also, apologies if my comments are a bit long and/or distracting.
You have the following problems:
1) you should re-order the initializations in your viewDidLoad, doing so:
initView() // must be initialized before the scene
initScene() // you have been crashing here on getting `gameView.scene`, but gameView was nil
initCamera()
2) cameraNode is not attached on the rootNode, so you may add the following code at the end of initCamera:
gameScene.rootNode.addChildNode(cameraNode)

Stereo ARSCNview to make VR and AR mix

I want to make a mix of virtual reality and augmented reality.
The goal is I have a stereo camera (for each eyes).
I tried to put two ARSCNView in a viewCotnroller but it seems ARKit enable only one ARWorldTrackingSessionConfiguration at the same time. How can I do that?
I researched to copy the graphic representation of a view to past this to an other view but impossible to find. Please help me to find the solution.
I found this link, maybe can it illumine us:
ARKit with multiple users
Here's a sample of my issue:
https://www.youtube.com/watch?v=d6LOqNnYm5s
PS: before unlike my post, comment why!
The following code is basically what Hal said. I previously wrote a few lines on github that might be able to help you get started. (Simple code, no barrel distortion, no adjustment for the narrow FOV - yet).
Essentially, we connect the same scene to the second ARSCNView (so both ARSCNViews are seeing the same scene). No need to get ARWorldTrackingSessionConfiguration working with 2 ARSCNViews. Then, we offset its pointOfView so it's positioned as the 2nd eye.
https://github.com/hanleyweng/iOS-Stereoscopic-ARKit-Template
The ARSession documentation says that ARSession is a shared object.
Every AR experience built with ARKit requires a single ARSession object. If you use an
ARSCNView
or
ARSKView
object to easily build the visual part of your AR experience, the view object includes an ARSession instance. If you build your own renderer for AR content, you'll need to instantiate and maintain an ARSession object yourself.
So there's a clue in that last sentence. Instead of two ARSCNView instances, use SCNView and share the single ARSession between them.
I expect this is a common use case, so it's worth filing a Radar to request stereo support.
How to do it right now?
The (singleton) session has only one delegate. You need two different delegate instances, one for each view. You could solve that with an object that sends the delegate messages to each view; solvable but a bit of extra work.
There's also the problem of needing two slightly different camera locations, one for each eye, for stereo vision. ARKit uses one camera, placed at the iOS device's location, so you'll have to fuzz that.
Then you have to deal with the different barrel distortions for each eye.
This, for me, adds up to writing my own custom object to intercept ARKit delegate messages, convert the coordinates to what I'd see from two different cameras, and manage the two distinct SCNViews (not ARSCNViews). Or perhaps use one ARSCNView (one eye), intercept its frame updates, and pass those frames on to a SCNView (the other eye).
File the Radar, post the number, and I'll dupe it.
To accomplish this, please use the following code:
import UIKit
import SceneKit
import ARKit
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet weak var sceneView: ARSCNView!
#IBOutlet weak var sceneView2: ARSCNView!
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
sceneView.showsStatistics = true
let scene = SCNScene(named: "art.scnassets/ship.scn")!
sceneView.scene = scene
sceneView.isPlaying = true
// SceneView2 Setup
sceneView2.scene = scene
sceneView2.showsStatistics = sceneView.showsStatistics
// Now sceneView2 starts receiving updates
sceneView2.isPlaying = true
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
let configuration = ARWorldTrackingConfiguration()
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
}
And don't forget to activate .isPlaying instance properties for both ARSCNViews.
Objective-C version of Han's github code, sceneViews created programatically, with y + z positions not updated - all credit Han:
-(void)setup{
//left
leftSceneView = [ARSCNView new];
leftSceneView.frame = CGRectMake(0, 0, w, h/2);
leftSceneView.delegate = self;
leftSceneView.autoenablesDefaultLighting = true;
[self.view addSubview:leftSceneView];
//right
rightSceneView = [ARSCNView new];
rightSceneView.frame = CGRectMake(0, h/2, w, h/2);
rightSceneView.playing = true;
rightSceneView.autoenablesDefaultLighting = true;
[self.view addSubview:rightSceneView];
//scene
SCNScene * scene = [SCNScene new];
leftSceneView.scene = scene;
rightSceneView.scene = scene;
//tracking
ARWorldTrackingConfiguration * configuration = [ARWorldTrackingConfiguration new];
configuration.planeDetection = ARPlaneDetectionHorizontal;
[leftSceneView.session runWithConfiguration:configuration];
}
-(void)renderer:(id<SCNSceneRenderer>)renderer updateAtTime:(NSTimeInterval)time {
dispatch_async(dispatch_get_main_queue(), ^{
//update right eye
SCNNode * pov = self->leftSceneView.pointOfView.clone;
SCNQuaternion orientation = pov.orientation;
GLKQuaternion orientationQuaternion = GLKQuaternionMake(orientation.x, orientation.y, orientation.z, orientation.w);
GLKVector3 eyePosition = GLKVector3Make(1, 0, 0);
GLKVector3 rotatedEyePosition = GLKQuaternionRotateVector3(orientationQuaternion, eyePosition);
SCNVector3 rotatedEyePositionSCNV = SCNVector3Make(rotatedEyePosition.x, rotatedEyePosition.y, rotatedEyePosition.z);
float mag = 0.066f;
float rotatedX = pov.position.x + rotatedEyePositionSCNV.x * mag;
float rotatedY = pov.position.y;// + rotatedEyePositionSCNV.y * mag;
float rotatedZ = pov.position.z;// + rotatedEyePositionSCNV.z * mag;
[pov setPosition:SCNVector3Make(rotatedX, rotatedY, rotatedZ)];
self->rightSceneView.pointOfView = pov;
});
}