How to add a new scene to an existing anchor and remove the previous scene? - swift

I have an rcproject file with about 12 scenes (500mb or so). In order to lessen the load on iOS devices I tried breaking it apart into separate rcproject files and change the scene using notification triggers. However when doing this and adding the new scene as a child to the main anchor, the new scene renders in a new spot, breaking the AR experience. There must be a way to add the new scenes to the exact same anchor/position. Alternatively, is there a better way than seperating the rcproject to lessen load on ram etc?
Here is my ARView
struct ARViewContainer: UIViewRepresentable {
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
// The experience consists of a "Base" (it acts as a permanent platform for all the scenes to be rendered on)
let baseAnchor = try! Base.loadIntro()
let introAnchor = try! IntroSceneOM.loadIntro()
introAnchor.actions.changeStoriesWithTrigger.onAction = loadStories
arView.scene.anchors.append(baseAnchor)
arView.scene.anchors.append(introAnchor)
func loadStories(_ entity: Entity?) -> Void {
arView.scene.anchors.remove(introAnchor)
let storiesAnchor = try! StoriesSceneOM.loadStoriesScene()
baseAnchor.addChild(storiesAnchor)
}
return arView
}
func updateUIView(_ uiView: ARView, context: Context) {} }
EDIT:
Recreated the project using an implementation of Andy Jazz's code.
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
var anchor = AnchorEntity()
let scene01 = try! Experience.loadBoxScene()
let scene02 = try! Experience.loadBallScene()
// Base Scene
let scene03 = try! Experience.loadFloppyScene()
anchor = AnchorEntity(.plane(.horizontal, classification: .any,
minimumBounds: [0.1, 0.1]))
scene01.actions.boxTapped.onAction = loadScene02
scene02.actions.ballTapped.onAction = loadScene01
anchor.addChild(scene01)
anchor.addChild(scene03)
arView.scene.anchors.append(anchor)
func loadScene02(_ entity: Entity?) -> Void {
scene01.removeFromParent()
anchor.addChild(scene02)
}
func loadScene01(_ entity: Entity?) -> Void {
scene02.removeFromParent()
anchor.addChild(scene01)
}
return arView
}
However I still get the same issue where the anchor moves each time a new scene is added.

The code is quite simple, but regarding the issue of loading scenes with a large number of polygons, remains unresolved. At the maximum, the current scene should contain no more than 100K polygons, but ideally they should be within 50...70K. Texture resolution should not exceed 2K.
import RealityKit
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
#IBOutlet var label: UILabel!
var anchor = AnchorEntity()
let scene01 = try! Experience.loadBox()
let scene02 = try! Experience.loadBall()
var cube = ModelEntity()
var sphere = ModelEntity()
override func viewDidLoad() {
super.viewDidLoad()
self.cube = scene01.steelBox?.children[0] as! ModelEntity
self.sphere = scene02.ball?.children[0] as! ModelEntity
self.anchor = AnchorEntity(.plane(.horizontal, classification: .any,
minimumBounds: [0.1, 0.1]))
self.anchor.addChild(cube)
arView.scene.anchors.append(anchor)
DispatchQueue.main.asyncAfter(deadline: .now() + 5.0) {
self.label.text = String(describing: self.anchor.id)
}
}
#IBAction func pressed(_ sender: UIButton) {
self.cube.removeFromParent()
self.anchor.addChild(sphere)
self.label.text = String(describing: self.anchor.id)
}
}

Related

How to record ARView Camera Position and Rotation over time and save it to a file

I have been trying to create an ARView for over two days now that can record the position of the camera in space over time and then save this to a keyframe file. Basically, I want to create an app that lets you record virtual camera movements that can then be used in 3d applications like Autodesk Maya or Cinema4D to drive a camera. preferred file outputs would be anything that can hold a camera object and animate it over time (alternatively also an object that moves over time, that I can then parent my camera to).
Here is the code I have, sorry for it being a bit chaotic, I have tried a LOT of different things... Basically I try to record device position and rotation and then save it to an MDL object, but somehow it doesn't animate. I have also tried multiple different file types (some of those didn't support keyframe animation, so that didn't help, but from what I understand, Alembic does)
import SwiftUI
import ARKit
import RealityKit
import ModelIO
struct ARViewContainer: UIViewRepresentable {
let session = ARSession()
let delegate = MySessionDelegate()
func makeUIView(context: Context) -> ARView {
// Set up the ARView with session
let arView = ARView(frame: .zero)
let boxAnchor = try! Experience.loadBox()
arView.scene.anchors.append(boxAnchor)
arView.session.delegate = delegate // assign delegate to the session
return arView
}
func updateUIView(_ uiView: ARView, context: Context) {
// Update the ARView if needed
}
func startARSession() {
// Start the ARSession
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = [.horizontal, .vertical]
session.run(configuration, options: [])
}
func stopARSession() {
// Stop the ARSession
session.pause()
}
}
class MySessionDelegate: NSObject, ARSessionDelegate {
var object: MDLMesh?
let asset = MDLAsset()
let cameraTransform = MDLTransform()
var documentsDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!
func session(_ session: ARSession, didUpdate frame: ARFrame) {
// Get the camera position and orientation for the current frame
let transform = frame.camera.transform
let rotation = frame.camera.eulerAngles
let position = transform.columns.3
let elapsedTime = frame.timestamp
cameraTransform.setTranslation(position[SIMD3(0,1,2)], forTime: elapsedTime)
cameraTransform.setRotation(rotation, forTime: elapsedTime)
print("Camera Transform: \(cameraTransform.matrix)")
}
}
struct Camera: View {
var body: some View {
VStack {
ARViewContainer().onAppear(perform: ARViewContainer().startARSession)
.onDisappear(perform: ARViewContainer().stopARSession)
Button("Export Recording") {
// Create an MDLAsset with a box representing the camera transform
let object = MDLMesh(boxWithExtent: .init(0.1, 0.1, 0.1), segments: .init(10, 10, 10), inwardNormals: false, geometryType: .triangles, allocator: nil)
object.name = "Camera Transform"
object.transform = MySessionDelegate().cameraTransform
let asset = MDLAsset()
asset.add(object)
// Export the MDLAsset to a file
let documentsDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!
let fileURL = documentsDirectory.appendingPathComponent("recording.abc")
try! asset.export(to: fileURL)
}
}
}
}
If there is a completely different way of doing it, please also share, I thank everybody in advance for any help!
Recording sixty 4x4 Transform Matrices per second
To write the data to a text file, I used an encoding of JSON data of 4x4 transformation matrix (i.e. using a complex transform). After clicking on the Record Transform Values button, the data immediately begins to be written to the toMaya.txt file. For convenience, I put all 16 values ​​of the matrix on the screen (I tested this on an iPad, so get a device with a bigger screen).
Data from nested lists in the toMaya.txt file can be easily read using regular Python or MEL scripting. Take a look at what nestings look like. Each of the 16 matrix values ​​is in Float type.
[x0,y0,z0,w0] is a first matrix column, [x1,y1,z1,w1] is a second matrix column, etc.
Here's the code:
import SwiftUI
import RealityKit
import Combine
struct ARViewContainer : UIViewRepresentable {
#Binding var arView: ARView
func makeUIView(context: Context) -> ARView { return arView }
func updateUIView(_ view: ARView, context: Context) { }
}
struct ContentView : View {
#State private var arView = ARView(frame: .zero)
#State private var subs: [AnyCancellable] = []
#State private var array: [[[Float]]] = [[ [1,0,0,0], [0,1,0,0],
[0,0,1,0], [0,0,0,1] ]]
let url = FileManager.default.urls(for: .documentDirectory,
in: .userDomainMask)[0]
.appendingPathComponent("toMaya.txt")
var body: some View {
ZStack {
ARViewContainer(arView: $arView).ignoresSafeArea()
HStack {
VStack {
Text("\(array.last![0][0])").foregroundColor(.white)
Text("\(array.last![0][1])").foregroundColor(.white)
Text("\(array.last![0][2])").foregroundColor(.white)
Text("\(array.last![0][3])").foregroundColor(.white)
}
VStack {
Text("\(array.last![1][0])").foregroundColor(.white)
Text("\(array.last![1][1])").foregroundColor(.white)
Text("\(array.last![1][2])").foregroundColor(.white)
Text("\(array.last![1][3])").foregroundColor(.white)
}
VStack {
Text("\(array.last![2][0])").foregroundColor(.white)
Text("\(array.last![2][1])").foregroundColor(.white)
Text("\(array.last![2][2])").foregroundColor(.white)
Text("\(array.last![2][3])").foregroundColor(.white)
}
VStack {
Text("\(array.last![3][0])").foregroundColor(.white)
Text("\(array.last![3][1])").foregroundColor(.white)
Text("\(array.last![3][2])").foregroundColor(.white)
Text("\(array.last![3][3])").foregroundColor(.white)
}
}
VStack {
Button("Record Transform Values") {
DispatchQueue.main.async {
arView.scene.subscribe(to: SceneEvents.Update.self) { _ in
let col = arView.cameraTransform.matrix.columns
let mtx: [[Float]] = [
[col.0.x, col.0.y, col.0.z, col.0.w],
[col.1.x, col.1.y, col.1.z, col.1.w],
[col.2.x, col.2.y, col.2.z, col.2.w],
[col.3.x, col.3.y, col.3.z, col.3.w]
]
array.append(mtx)
if let data = try? JSONEncoder().encode(self.array) {
guard let str = String(data: data, encoding: .ascii)
else { return }
do {
try str.write(to: url,
atomically: true,
encoding: .ascii)
print(url)
} catch {
print(error.localizedDescription)
}
}
}.store(in: &subs)
}
}
Spacer()
}
VStack {
Spacer()
Text("\(array.count)").foregroundColor(.white)
}
}
}
}
My toMaya.txt file is waiting for me in the following debugging directory:
file:///var/mobile/Containers/Data/Application/7C675F52-C78B-4252-98B5-3EBD37A3F832/Documents/toMaya.txt

No exact matches in call to initializer for AnchorEntity

Hi I'm trying to make a AR face tracking project, but here I have a error when assigning a face anchor to AnchorEntity. Error message is "No exact matches in call to initializer". I tried different ways but it didn't work at all. I'm a new swift learner, could anyone help me on this? Thanks
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
class Coordinator: NSObject, ARSessionDelegate {
var parent: ARViewContainer
var faceAnchorEntity: AnchorEntity
var arView: ARView
init(_ parent: ARViewContainer) {
self.parent = parent
self.faceAnchorEntity = AnchorEntity()
self.arView = ARView()
}
func session(_ session: ARSession, didAdd anchors: [ARAnchor]) {
guard let faceAnhcor = anchors[0] as? ARFaceAnchor else { return }
parent.viewModel.vertices = faceAnhcor.geometry.vertices
faceAnchorEntity = AnchorEntity(anchor: faceAnhcor)
arView.scene.addAnchor(faceAnhcor)
}
}
You can use RealityKit's native .face target, which is much easier to implement.
import SwiftUI
import RealityKit
import ARKit
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
let ball = ModelEntity(mesh: .generateSphere(radius: 0.07))
arView.session.run(ARFaceTrackingConfiguration())
let anchor = AnchorEntity(.face)
anchor.position.y += 0.04
anchor.addChild(ball)
arView.scene.addAnchor(anchor)
return arView
}

How to render a Canonical Face Mesh with RealityKit?

I’m trying to render a face mesh with RealityKit, no success yet. So when ARKit detected a human face, then ARSession generates an ARFaceAnchor which has a face geometry mesh in it.
But it cannot being generated as a model entity.
Could anyone help on this?
Canonical Face Mesh in RealityKit
To programmatically generate and render an ARKit's canonical face mesh (ARFaceGeometry object consisting of 1220 vertices) in RealityKit 2.0 use the following code:
import ARKit
import RealityKit
class ControllerView: UIViewController {
#IBOutlet var arView: ARView!
var anchor = AnchorEntity()
var model = ModelEntity()
override func viewDidLoad() {
super.viewDidLoad()
arView.automaticallyConfigureSession = false
arView.session.delegate = self
guard ARFaceTrackingConfiguration.isSupported
else {
fatalError("We can't run face tracking config")
}
let config = ARFaceTrackingConfiguration()
config.maximumNumberOfTrackedFaces = 1
arView.session.run(config)
}
}
Then create a method for converting face anchor's sub-properties. Note that I used for-in loop to convert indices from [Int16] to [UInt32] type (type casting doesn't help here).
extension ControllerView {
private func nutsAndBoltsOf(_ anchor: ARFaceAnchor) -> MeshDescriptor {
let vertices: [simd_float3] = anchor.geometry.vertices
var triangleIndices: [UInt32] = []
let texCoords: [simd_float2] = anchor.geometry.textureCoordinates
for index in anchor.geometry.triangleIndices { // [Int16]
triangleIndices.append(UInt32(index))
}
print(vertices.count) // 1220 vertices
var descriptor = MeshDescriptor(name: "canonical_face_mesh")
descriptor.positions = MeshBuffers.Positions(vertices)
descriptor.primitives = .triangles(triangleIndices)
descriptor.textureCoordinates = MeshBuffers.TextureCoordinates(texCoords)
return descriptor
}
}
And, at last, let's run a delegate's method to feed a mesh resource:
extension ControllerView: ARSessionDelegate {
func session(_ session: ARSession, didAdd anchors: [ARAnchor]) {
guard let faceAnchor = anchors[0] as? ARFaceAnchor else { return }
arView.session.add(anchor: faceAnchor)
self.anchor = AnchorEntity(anchor: faceAnchor)
self.anchor.scale *= 1.2
let mesh: MeshResource = try! .generate(from: [nutsAndBoltsOf(faceAnchor)])
var material = SimpleMaterial(color: .magenta, isMetallic: true)
self.model = ModelEntity(mesh: mesh, materials: [material])
self.anchor.addChild(self.model)
arView.scene.anchors.append(self.anchor)
}
}
Result (tested on iPad Pro 4th gen in iPadOS 16.2).
I also recommend you take a look at the post about visualizing detected planes in RealityKit 2.0.
Merry Christmas!

Problem loading .usdz into a custom Entity class

Is there any way to load usdz model to a custom entity class?
I tried to cast the returned ModelEntity to my custom class but it didn't work out.
let entity: CustomEntity = try! CustomEntity.load(named: name) as! CustomEntity
UIKit version
import UIKit
import RealityKit
class CustomClass: Entity, HasModel {
let modelName: String? = "gramophone"
let myAnchor = AnchorEntity()
func loader() -> AnchorEntity {
if let name = self.modelName {
let modelEntity = try! CustomClass.loadModel(named: name)
myAnchor.addChild(modelEntity)
}
return myAnchor
}
}
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
let modelName: String? = "gramophone"
override func viewDidLoad() {
super.viewDidLoad()
let usdz = CustomClass().loader()
arView.scene.anchors.append(usdz)
}
}
SwiftUI version:
import SwiftUI
import RealityKit
class CustomClass: Entity, HasModel {
func printer() {
print("I'm inside CustomClass...")
}
}
struct ARViewContainer: UIViewRepresentable {
let modelName: String? = "gramophone"
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
typealias CustomEntity = ModelEntity
var modelEntity = CustomEntity()
if let name = self.modelName {
modelEntity = try! CustomClass.loadModel(named: name)
let anchor = AnchorEntity()
anchor.addChild(modelEntity)
arView.scene.anchors.append(anchor)
}
return arView
}
func updateUIView(_ uiView: ARView, context: Context) {
CustomClass().printer()
}
}

Pass value from SwiftUI to ARKit dynamically

I'm currently engaging personal project using ARKit with Swift.
In this app, a skeleton is appearing by detected body and it tracks body's motion.
What I want to know is, how to change the position of skeleton by touching a slider dynamically.
The position of skeleton is defined in ARKit class ARDelegateHandler such as self.characterOffset = [1, 0, 0] which means 1m right from a detected body.
I want to change the characterOffset's x-axis value by slider in SwiftUI.(something like self.characterOffset = [x, 0, 0])
Here is my code.
ARViewContainer.swift
import SwiftUI
import RealityKit
import ARKit
import Combine
struct ARViewContainer: UIViewRepresentable {
let characterAnchor = AnchorEntity()
#Binding var offsetValue:Float
#ObservedObject var offsetInstance = Offset()
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
arView.session.delegate = context.coordinator
return arView
}
func updateUIView(_ uiView: ARView, context: Context) {
let configuration = ARBodyTrackingConfiguration()
uiView.session.run(configuration)
uiView.scene.addAnchor(characterAnchor)
print(offsetValue)
}
func makeCoordinator() -> ARDelegateHandler {
ARDelegateHandler(self, anchor: characterAnchor)
}
class ARDelegateHandler: NSObject, ARSessionDelegate {
var arVC: ARViewContainer
let characterAnchor: AnchorEntity
var character: BodyTrackedEntity?
var characterOffset: SIMD3<Float>
init(_ control: ARViewContainer, anchor: AnchorEntity) {
self.arVC = control
self.characterAnchor = anchor
self.characterOffset = [1, 0, 0]
super.init()
setSkeleton()
}
func setSkeleton(){
var cancellable: AnyCancellable? = nil
cancellable = Entity.loadBodyTrackedAsync(named: "character/robot").sink(
receiveCompletion: { completion in
if case let .failure(error) = completion {
print("Error: Unable to load model: \(error.localizedDescription)")
}
cancellable?.cancel()
}, receiveValue: { (character: Entity) in
if let character = character as? BodyTrackedEntity {
character.scale = [1, 1, 1]
self.character = character
cancellable?.cancel()
} else {
print("Error: Unable to load model as BodyTrackedEntity")
}
})
}
func session(_ session: ARSession, didUpdate anchors: [ARAnchor]) {
for anchor in anchors {
guard let bodyAnchor = anchor as? ARBodyAnchor else { continue }
let bodyPosition = simd_make_float3(bodyAnchor.transform.columns.3)
characterAnchor.position = bodyPosition + characterOffset
characterAnchor.orientation = Transform(matrix: bodyAnchor.transform).rotation
if let character = character, character.parent == nil {
characterAnchor.addChild(character)
}
}
}
}
}
ContentView.swift (User is expected to touch slider and offsetValue would be changed. SlideView has been already implemented)
import SwiftUI
import RealityKit
import ARKit
import Combine
struct ContentView : View {
#State var offsetValue:Float = 0.0
#ObservedObject var offsetInstance = Offset()
var body: some View {
VStack{
Button(action:{self.offsetInstance.setOffset(offset: 1.0)},
label:{Text("check")})
ZStack(alignment: .bottom) {
ARViewContainer(offsetValue: $offsetValue)
SlideView(offSetValue: $offsetValue)
}
}
}
}
Thank you very much for your help!