I'm adding a scene to my ARView that contains one entity. I was wondering how I would be able to get the distance between the entity's anchor and the camera. I know how to get the current position of the camera, but the position I'm getting for the arScene must be wrong since subtracting both positions doesn't yield the right position. Here is some code below.
import RealityKit
class ViewController: UIViewController, ARSessionDelegate {
let arScene = try! TestProj.loadScene()
#IBOutlet var sceneView: ARView!
override func viewDidLoad() {
...
sceneView.scene.addAnchor(arScene)
...
}
func session(_ session: ARSession, didUpdate frame: ARFrame) {
// Calculate distance here
let cameraPos = frame.camera.transform.columns.3
let entityPos = // Position of entity's anchor
let distance = // Find distance here
}
}
You need to perform a Convex Raycast against all the geometry in the RealityKit's scene for a ray between two end points:
import UIKit
import RealityKit
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
override func viewDidLoad() {
super.viewDidLoad()
let entity = ModelEntity(mesh: .generateBox(size: 0.4))
entity.name = "Cube"
let anchor = AnchorEntity(world: [0,0,0])
anchor.addChild(entity)
arView.scene.anchors.append(anchor)
// For every entity that could be hit,
// we must generate a collision shape.
entity.generateCollisionShapes(recursive: true)
}
#IBAction func onTap(_ sender: UITapGestureRecognizer) {
let query: CollisionCastQueryType = .nearest
let mask: CollisionGroup = .default
let camera = arView.session.currentFrame?.camera
let x = (camera?.transform.columns.3.x)!
let y = (camera?.transform.columns.3.y)!
let z = (camera?.transform.columns.3.z)!
let transform: SIMD3<Float> = [x, y, z]
let raycasts: [CollisionCastHit] = arView.scene.raycast(
from: transform,
to: [0, 0, 0],
query: query,
mask: mask,
relativeTo: nil)
guard let raycast: CollisionCastHit = raycasts.first
else { return }
print(raycast.distance) // Distance from the ray origin to the hit
print(raycast.entity.name) // The entity that was hit
print(raycast.position) // The position of the hit
}
}
Also, as #maxxfrazer suggested, you can access camera transforms more easily:
let translate = arView.cameraTransform.translation
let x = translate.x
let y = translate.y
let z = translate.z
let transform: SIMD3<Float> = [x, y, z]
Related
I have set a virtual object in rear camera view. I want to move that object using facial expression with respect to world origin and measure the displacement angles of the virtual object.
Is that possible using ARKit or RealityKit?
Use the following solution. At first setup a configuration:
import RealityKit
import ARKit
class ViewController: UIViewController, ARSessionDelegate {
#IBOutlet var arView: ARView!
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
arView.session.delegate = self
arView.automaticallyConfigureSession = false
let config = ARFaceTrackingConfiguration()
config.isWorldTrackingEnabled = true // Simultaneous tracking
arView.session.run(config)
}
}
Run your transform animation when a defined facial expression occurs:
func facialExpression(anchor: ARFaceAnchor) {
let eyeUpLeft = anchor.blendShapes[.eyeLookUpLeft]
let eyeUpRight = anchor.blendShapes[.eyeLookUpRight]
if ((eyeUpLeft?.decimalValue ?? 0.0) +
(eyeUpRight?.decimalValue ?? 0.0)) > 0.75 {
// ModelEntity's animation goes here
}
}
Delegate's method (running at 60 fps):
func session(_ session: ARSession, didUpdate anchors: [ARAnchor]) {
guard let faceAnchor = anchors[0] as? ARFaceAnchor else { return }
self.facialExpression(anchor: faceAnchor)
}
The answer to your second question you can see HERE.
I’m trying to render a face mesh with RealityKit, no success yet. So when ARKit detected a human face, then ARSession generates an ARFaceAnchor which has a face geometry mesh in it.
But it cannot being generated as a model entity.
Could anyone help on this?
Canonical Face Mesh in RealityKit
To programmatically generate and render an ARKit's canonical face mesh (ARFaceGeometry object consisting of 1220 vertices) in RealityKit 2.0 use the following code:
import ARKit
import RealityKit
class ControllerView: UIViewController {
#IBOutlet var arView: ARView!
var anchor = AnchorEntity()
var model = ModelEntity()
override func viewDidLoad() {
super.viewDidLoad()
arView.automaticallyConfigureSession = false
arView.session.delegate = self
guard ARFaceTrackingConfiguration.isSupported
else {
fatalError("We can't run face tracking config")
}
let config = ARFaceTrackingConfiguration()
config.maximumNumberOfTrackedFaces = 1
arView.session.run(config)
}
}
Then create a method for converting face anchor's sub-properties. Note that I used for-in loop to convert indices from [Int16] to [UInt32] type (type casting doesn't help here).
extension ControllerView {
private func nutsAndBoltsOf(_ anchor: ARFaceAnchor) -> MeshDescriptor {
let vertices: [simd_float3] = anchor.geometry.vertices
var triangleIndices: [UInt32] = []
let texCoords: [simd_float2] = anchor.geometry.textureCoordinates
for index in anchor.geometry.triangleIndices { // [Int16]
triangleIndices.append(UInt32(index))
}
print(vertices.count) // 1220 vertices
var descriptor = MeshDescriptor(name: "canonical_face_mesh")
descriptor.positions = MeshBuffers.Positions(vertices)
descriptor.primitives = .triangles(triangleIndices)
descriptor.textureCoordinates = MeshBuffers.TextureCoordinates(texCoords)
return descriptor
}
}
And, at last, let's run a delegate's method to feed a mesh resource:
extension ControllerView: ARSessionDelegate {
func session(_ session: ARSession, didAdd anchors: [ARAnchor]) {
guard let faceAnchor = anchors[0] as? ARFaceAnchor else { return }
arView.session.add(anchor: faceAnchor)
self.anchor = AnchorEntity(anchor: faceAnchor)
self.anchor.scale *= 1.2
let mesh: MeshResource = try! .generate(from: [nutsAndBoltsOf(faceAnchor)])
var material = SimpleMaterial(color: .magenta, isMetallic: true)
self.model = ModelEntity(mesh: mesh, materials: [material])
self.anchor.addChild(self.model)
arView.scene.anchors.append(self.anchor)
}
}
Result (tested on iPad Pro 4th gen in iPadOS 16.2).
I also recommend you take a look at the post about visualizing detected planes in RealityKit 2.0.
Merry Christmas!
I want to achieve the billboard effect in RealityKit (the plane always look at the camera), I used the Entity.Look() method, but the result is weird, I can't even see the plane, the scripts I used as below, so, what is the problem?
struct ARViewContainer: UIViewRepresentable {
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
let config = ARWorldTrackingConfiguration()
config.planeDetection = .horizontal
arView.session.run(config, options:[ ])
arView.session.delegate = arView
arView.createPlane()
return arView
}
func updateUIView(_ uiView: ARView, context: Context) { }
}
var planeMesh = MeshResource.generatePlane(width: 0.15, height: 0.15)
var planeMaterial = SimpleMaterial(color:.white,isMetallic: false)
var planeEntity = ModelEntity(mesh:planeMesh,materials:[planeMaterial])
var arCameraPostion : SIMD3<Float>!
var isPlaced = false
extension ARView : ARSessionDelegate{
func createPlane(){
let planeAnchor = AnchorEntity(plane:.horizontal)
planeAnchor.addChild(planeEntity)
self.scene.addAnchor(planeAnchor)
//planeAnchor.transform.rotation = simd_quatf(angle: .pi, axis: [0,1,0])
}
public func session(_ session: ARSession, didUpdate frame: ARFrame){
guard let arCamera = session.currentFrame?.camera else { return }
if isPlaced {
arCameraPostion = SIMD3(arCamera.transform.columns.3.x,0,arCamera.transform.columns.3.z)
planeEntity.look(at: arCameraPostion, from: planeEntity.position, upVector: [0, 1, 0],relativeTo: nil)
}
}
public func session(_ session: ARSession, didAdd anchors: [ARAnchor]) {
isPlaced = true
}
}
session(_:didUpdate:) method
Try the following logic to implement a "billboard" behavior for RealityKit camera. You can use this code as a starting point. It generates a rotation of the model around its local Y axis based on camera position.
import RealityKit
import ARKit
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
var model = Entity()
override func viewDidLoad() {
super.viewDidLoad()
arView.session.delegate = self
let config = ARWorldTrackingConfiguration()
arView.session.run(config)
self.model = try! ModelEntity.load(named: "drummer")
let anchor = AnchorEntity(world: [0, 0, 0])
anchor.addChild(self.model)
arView.scene.anchors.append(anchor)
}
}
A pivot point of the model must be in the center of it (not at some distance from the model).
extension ViewController: ARSessionDelegate {
func session(_ session: ARSession, didUpdate frame: ARFrame) {
let camTransform: float4x4 = arView.cameraTransform.matrix
let alongXZPlane: simd_float4 = camTransform.columns.3
let yaw: Float = atan2(alongXZPlane.x - model.position.x,
alongXZPlane.z - model.position.z)
print(yaw)
// Identity matrix 4x4
var positionAndScale = float4x4()
// position
positionAndScale.columns.3.z = -0.25
// scale
positionAndScale.columns.0.x = 0.01
positionAndScale.columns.1.y = 0.01
positionAndScale.columns.2.z = 0.01
// orientation matrix
let orientation = Transform(pitch: 0, yaw: yaw, roll: 0).matrix
// matrices multiplication
let transform = simd_mul(positionAndScale, orientation)
self.model.transform.matrix = transform
}
}
subscribe(to:on:_:) method
Alternatively, you can implement a subscription to the event stream.
import RealityKit
import Combine
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
var model = Entity()
var subs: [AnyCancellable] = []
override func viewDidLoad() {
super.viewDidLoad()
self.model = try! ModelEntity.load(named: "drummer")
let anchor = AnchorEntity(world: [0, 0, 0])
anchor.addChild(self.model)
arView.scene.anchors.append(anchor)
arView.scene.subscribe(to: SceneEvents.Update.self) { _ in
let camTransform: float4x4 = self.arView.cameraTransform.matrix
let alongXZPlane: simd_float4 = camTransform.columns.3
let yaw: Float = atan2(alongXZPlane.x - self.model.position.x,
alongXZPlane.z - self.model.position.z)
var positionAndScale = float4x4()
positionAndScale.columns.3.z = -0.25
positionAndScale.columns.0.x = 0.01
positionAndScale.columns.1.y = 0.01
positionAndScale.columns.2.z = 0.01
let orientation = Transform(pitch: 0, yaw: yaw, roll: 0).matrix
let transform = simd_mul(positionAndScale, orientation)
self.model.transform.matrix = transform
}.store(in: &subs)
}
}
I added purchased 3D zombie model and I want to add it multiple times so there is a group of zombies chasing the player (his camera). The DAE file (zombie) I have has couple of animations (attack, walk, run and so on). I added a couple of these zombie models and they all appear on the positions I wanted them to appear (previously determined coordinates stored in zombies array)
When they appear on the screen they do all the animations as it is presented in DAE file.
And now my question: Is it possible to add this DAE zombie model but display it in a specified frame? For example 2-122? My code so far (I deleted what was unnecessary)
import UIKit
import CoreLocation
import SceneKit
import ARKit
#available(iOS 11.0, *)
class VRViewController: UIViewController, CLLocationManagerDelegate
{
/* VARIABLES */
var number:Int = 10
var speed:Int = 4
var zombies = [[AnyObject]]()
var timer = Timer()
var nodes = [SCNNode]()
let configuration = ARWorldTrackingConfiguration()
#IBOutlet weak var sceneView: ARSCNView!
override func viewWillAppear(_ animated: Bool)
{
super.viewWillAppear(animated)
// Run the view's session
sceneView.session.run(configuration)
}
override func viewDidLoad()
{
super.viewDidLoad()
// Set the view's delegate
sceneView.delegate = self as? ARSCNViewDelegate
// Create a new scene
let scene = SCNScene()
// Set the scene to the view
sceneView.scene = scene
sceneView.autoenablesDefaultLighting = true
/* sceneView.debugOptions = [.showConstraints, .showLightExtents, ARSCNDebugOptions.showFeaturePoints, ARSCNDebugOptions.showWorldOrigin] */
}
override func viewDidAppear(_ animated: Bool)
{
setZombies()
timer = Timer.scheduledTimer(timeInterval: 1, target: self, selector: (#selector(VRViewController.restartSession)), userInfo: nil, repeats: true)
}
override func didReceiveMemoryWarning()
{
super.didReceiveMemoryWarning()
}
/* MAIN FUNCTIONS */
func setZombies()
{
for i in 0..<number
{
let zombiePlane = zombies[i][0] as! CGPoint
let thetaPlane = zombies[i][1] as! Double
let xPlane = abs(zombiePlane.x - center.x)
let yPlane = abs(zombiePlane.y - center.y)
let distance = sqrt((xPlane*xPlane) + (yPlane*yPlane))
var theta3D:Double = thetaPlane * (180/Double.pi) - diff2D - 90 /* degrees */
theta3D = theta3D * (Double.pi/180) /* radians */
let x3D = Float(distance) * Float(cos(theta3D))
let z3D = Float(distance) * Float(sin(theta3D))
addZombies(i:i, x: x3D, y: -1.5, z: z3D)
}
}
func addZombies(i:Int, x: Float, y: Float, z: Float) {
guard let zombieScene = SCNScene(named: "art.scnassets/zombie/StrongZombie.DAE") else { return }
let zombieNode = SCNNode()
let zombieSceneChildNodes = zombieScene.rootNode.childNodes
for childNode in zombieSceneChildNodes {
zombieNode.addChildNode(childNode)
}
zombieNode.position = SCNVector3(x, y, z)
zombieNode.scale = SCNVector3(0.1, 0.1, 0.1)
sceneView.scene.rootNode.addChildNode(zombieNode)
nodes.append(zombieNode)
}
func restartSession()
{
/* self.sceneView.session.run(configuration, options: [.resetTracking]) */
for i in 0..<number
{
let theta3D = atan2(nodes[i].position.z, nodes[i].position.x)
let movement = (Float(speed)/5)
print(speed)
let distance = sqrt((nodes[i].position.x)*(nodes[i].position.x) + (nodes[i].position.z)*(nodes[i].position.z)) - movement
let x3D = Float(distance) * Float(cos(theta3D))
let z3D = Float(distance) * Float(sin(theta3D))
nodes[i].position = SCNVector3(x:x3D, y:-10, z:z3D)
}
}
/* SESSION FUNCTIONS */
func session(_ session: ARSession, didFailWithError error: Error)
{
// Present an error message to the user
}
func sessionWasInterrupted(_ session: ARSession)
{
// Inform the user that the session has been interrupted, for example, by presenting an overlay
}
func sessionInterruptionEnded(_ session: ARSession)
{
// Reset tracking and/or remove existing anchors if consistent tracking is required
}
}
I have two ARSCNView sessions side by side for stereo view. I am trying to create a box in stereo view and then make it spin.
All works fine until I move the parentNode using self.parentNode.runAction
The movement only occurs in the right side view (SceneView2). No movement occurs in the left side view. Views are also offset. I need the movement to be synchronized in both the left and the right views.
Thanks.
Here is the code:
import UIKit
import ARKit
import SceneKit
import CoreLocation
import GLKit
class ViewController1: UIViewController, ARSCNViewDelegate {
#IBOutlet weak var sceneView: ARSCNView!
#IBOutlet weak var SceneView2: ARSCNView!
#IBOutlet weak var Label: UILabel!
var parentNode: SCNNode!
override func viewDidLoad() {
super.viewDidLoad()
addBox()
// Set the view's delegate
sceneView.delegate = self
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
let configuration = ARWorldTrackingConfiguration()
configuration.worldAlignment = .gravityAndHeading
configuration.planeDetection = .horizontal
sceneView.session.run(configuration)
movebox()
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Release any cached data, images, etc that aren't in use.
}
func session(_ session: ARSession, didFailWithError error: Error) {
// Present an error message to the user
}
func sessionWasInterrupted(_ session: ARSession) {
// Inform the user that the session has been interrupted, for example, by presenting an overlay
}
func sessionInterruptionEnded(_ session: ARSession) {
// Reset tracking and/or remove existing anchors if consistent tracking is required
}
// UPDATE EVERY FRAME:
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
DispatchQueue.main.async {
self.updateFrame()
//self.movebox()
}
}
func updateFrame() {
// Clone pointOfView for Second View
let pointOfView : SCNNode = (sceneView.pointOfView?.clone())!
// Determine Adjusted Position for Right Eye
let orientation : SCNQuaternion = pointOfView.orientation
let orientationQuaternion : GLKQuaternion = GLKQuaternionMake(orientation.x, orientation.y, orientation.z, orientation.w)
let eyePos : GLKVector3 = GLKVector3Make(1.0, 0.0, 0.0)
let rotatedEyePos : GLKVector3 = GLKQuaternionRotateVector3(orientationQuaternion, eyePos)
let rotatedEyePosSCNV : SCNVector3 = SCNVector3Make(rotatedEyePos.x, rotatedEyePos.y, rotatedEyePos.z)
let mag : Float = 0.066 // This is the value for the distance between two pupils (in metres). The Interpupilary Distance (IPD).
pointOfView.position.x += rotatedEyePosSCNV.x * mag
pointOfView.position.y += rotatedEyePosSCNV.y * mag
pointOfView.position.z += rotatedEyePosSCNV.z * mag
// Set PointOfView for SecondView
SceneView2.pointOfView = pointOfView
}
func addBox() {
let sideMaterial = SCNMaterial()
sideMaterial.diffuse.contents = UIColor.orange
sideMaterial.locksAmbientWithDiffuse = true;
let box = SCNBox(width: 0.3, height: 0.6, length: 0.1, chamferRadius: 0.005)
box.materials = [sideMaterial, sideMaterial, sideMaterial, sideMaterial, sideMaterial, sideMaterial]
let boxNode = SCNNode()
boxNode.geometry = box
boxNode.position = SCNVector3(0, 0, -0.2)
let scene = SCNScene()
scene.rootNode.addChildNode(boxNode)
parentNode = scene.rootNode
parentNode.position = SCNVector3(0, 0, -1.0)
sceneView.scene = scene
// Set up SceneView2 (Right Eye)
SceneView2.scene = scene
SceneView2.showsStatistics = sceneView.showsStatistics
SceneView2.isPlaying = true // Turn on isPlaying to ensure this ARSCNView recieves updates.
}
func movebox() {
DispatchQueue.main.async {
let rotate = SCNAction.rotateBy(x: 0, y: 5, z: 0, duration: 20)
let moveSequence = SCNAction.sequence([rotate])
let moveLoop = SCNAction.repeatForever(moveSequence)
self.parentNode.runAction(moveLoop)
}
}
}