Collide RealityKit 3D model with LiDAR mesh - swift

I spent many days trying to understand and follow examples without success.
My goal is to place a virtual AR object to the real world scanned previously with the LiDAR. With the showSceneUnderstanding I can see the realtime mesh created ok that fine.
With a tap function I can insert a usdz file, that also fine.
Because I have toyModel.physicsBody?.mode = .kinematic and self.arView.installGestures(for: toyRobot) I can move/scale the model.
Now want to be able move the model AND collide with the mesh generated by the LiDAR. When I move the model to a scanned wall the mesh it's stopped for example.
Here is my complete code :
import UIKit
import RealityKit
import ARKit
class ViewController: UIViewController, ARSessionDelegate {
#IBOutlet var arView: ARView!
var tapRecognizer = UITapGestureRecognizer()
override func viewDidLoad() {
super.viewDidLoad()
self.arView.session.delegate = self
//Scene Understanding options
self.arView.environment.sceneUnderstanding.options.insert([.physics, .collision, .occlusion])
//Only for dev
self.arView.debugOptions.insert(.showSceneUnderstanding)
self.tapRecognizer = UITapGestureRecognizer(target: self, action: #selector(placeObject(_:)))
self.arView.addGestureRecognizer(self.tapRecognizer)
}
#objc func placeObject(_ sender: UITapGestureRecognizer) {
// Perform a ray cast against the mesh (sceneUnderstanding)
// Note: Ray-cast option ".estimatedPlane" with alignment ".any" also takes the mesh into account.
let tapLocation = sender.location(in: arView)
if let result = arView.raycast(from: tapLocation, allowing: .estimatedPlane, alignment: .any).first {
// Load the "Toy robot"
let toyRobot = try! ModelEntity.loadModel(named: "toy_robot_vintage.usdz")
// Add gestures to the toy (only available is physicsBody mode == kinematic)
self.arView.installGestures(for: toyRobot)
// Toy Anchor to place the toy on surface
let toyAnchor = AnchorEntity(world: result.worldTransform)
toyAnchor.addChild(toyRobot)
// Create a "Physics" model of the toy in order to add physics mode
guard let toyModel = toyAnchor.children.first as? HasPhysics else {
return
}
// Because toyModel is a fresh new model we need to init physics
toyModel.generateCollisionShapes(recursive: true)
toyModel.physicsBody = .init()
// Add the physics body mode
toyModel.physicsBody?.mode = .kinematic
let test = ShapeResource.generateConvex(from: toyRobot.model!.mesh)
toyModel.components[CollisionComponent] = CollisionComponent(shapes: [test], mode: .default, filter: .default)
// Finally add the toy anchor to the scene
self.arView.scene.addAnchor(toyAnchor)
}
}
}
Someone knows if it's possible to achieve that ? Many thanks in advance!

Following the previous discussion with #AndyFedoroff I added convext raycast in order, always, to collide the placed 3d object with the LiDAR created mesh. Here is my full code. I don't know if I'm doing well... In any case still doesn't work.
import UIKit
import RealityKit
import ARKit
class ViewController: UIViewController, ARSessionDelegate {
#IBOutlet var arView: ARView!
var tapRecognizer = UITapGestureRecognizer()
var panGesture = UIPanGestureRecognizer()
var panGestureEntity: Entity? = nil
override func viewDidLoad() {
super.viewDidLoad()
self.arView.session.delegate = self
//Scene Understanding options
self.arView.environment.sceneUnderstanding.options.insert([.physics, .collision, .occlusion])
//Only for dev
self.arView.debugOptions.insert(.showSceneUnderstanding)
self.tapRecognizer = UITapGestureRecognizer(target: self, action: #selector(self.placeObject))
self.arView.addGestureRecognizer(self.tapRecognizer)
self.panGesture = UIPanGestureRecognizer(target: self, action: #selector(self.didPan))
self.arView.addGestureRecognizer(self.panGesture)
}
#objc func placeObject(_ sender: UITapGestureRecognizer) {
// Perform a ray cast against the mesh (sceneUnderstanding)
// Note: Ray-cast option ".estimatedPlane" with alignment ".any" also takes the mesh into account.
let tapLocation = sender.location(in: arView)
if let result = self.arView.raycast(from: tapLocation, allowing: .estimatedPlane, alignment: .any).first {
// Load the "Toy robot"
let toyRobot = try! ModelEntity.loadModel(named: "toy_robot_vintage.usdz")
// Add gestures to the toy (only available is physicsBody mode == kinematic)
self.arView.installGestures(for: toyRobot)
// Toy Anchor to place the toy on surface
let toyAnchor = AnchorEntity(world: result.worldTransform)
toyAnchor.addChild(toyRobot)
// Create a "Physics" model of the toy in order to add physics mode
guard let model = toyAnchor.children.first as? (Entity & HasPhysics)
else { return }
model.generateCollisionShapes(recursive: true)
model.physicsBody = PhysicsBodyComponent(shapes: [.generateBox(size: .one)],
mass: 1.0,
material: .default,
mode: .kinematic)
// Finally add the toy anchor to the scene
self.arView.scene.addAnchor(toyAnchor)
}
}
#objc public func didPan(_ sender: UIPanGestureRecognizer) {
print(sender.state)
let point = sender.location(in: self.arView)
switch sender.state {
case .began:
if let entity = self.arView.hitTest(point).first?.entity {
self.panGestureEntity = entity
} else {
self.panGestureEntity = nil
}
case .changed:
if let entity = self.panGestureEntity {
if let raycast = arView
.scene
.raycast(origin: .zero, direction: .one, length: 1, query: .all, mask: .all, relativeTo: entity)
.first
{
print("hit", raycast.entity, raycast.distance)
}
}
case .ended:
self.panGestureEntity = nil
default: break;
}
}
}

Try downcasting to Entity & HasPhysics:
guard let model = anchor.children.first as? (Entity & HasPhysics)
else { return }
model.generateCollisionShapes(recursive: true)
model.physicsBody = PhysicsBodyComponent(shapes: [.generateBox(size: .one)],
mass: 1.0,
material: .default,
mode: .kinematic)
Official documentation says:
For non-model entities, generateCollisionShapes(recursive:) method has no effect. Nevertheless, the method is defined for all entities so that you can call it on any entity, and have the calculation propagate recursively to all that entity’s descendants.

Related

Errors: 'Argument passed to call that takes no arguments' & 'Type of expression is ambiguous without more context'

Trying to render a 2D image to show on plane instead of the white/colored plane I have have currently. The following code renders the plane as colored. Tried the following to
the 2D image appear over the colored plane instead. The image I am using is in Assets folder and is: PNG image - 966 KB. Looked/read/researched Apple Docs, StackOflow searching for how.
import UIKit
import SceneKit
import ARKit
import RealityKit
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
override func viewDidLoad() {
super.viewDidLoad()
// Set the view's delegate
sceneView.delegate = self
// Show statistics such as fps and timing information
sceneView.showsStatistics = true
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Create a session configuration
let configuration = ARImageTrackingConfiguration()
if let imageToTrack = ARReferenceImage.referenceImages(inGroupNamed: "BdayCardImages", bundle: Bundle.main) {
configuration.trackingImages = imageToTrack
configuration.maximumNumberOfTrackedImages = 1
}
// Run the view's session
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
// Pause the view's session
sceneView.session.pause()
}
// MARK: - ARSCNViewDelegatemake
//this method finds and triggers image plane
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: SCNNode) -> ARAnchor?
{
let node = SCNNode()
if let imageAnchor = anchor as? ARImageAnchor
{
let plane = SCNPlane(
width: imageAnchor.referenceImage.physicalSize.width,
height: imageAnchor.referenceImage.physicalSize.height
)
plane.firstMaterial?.diffuse.contents =
UIColor(red: 1, green: 1.3, blue: 0.5, alpha: 0.8)
let planeNode = SCNNode(geometry: plane)
if imageAnchor.referenceImage.name == "TamBDYCArd3"
{
if let bdayImage = ARImageAnchor(named: "assets/TamBDYCArd3") //Error here: 'Argument passed to call that takes no arguments'
{
if let imageNode = bdayImage.rootNode.childNode.first //Error here: Type of expression is ambiguous without more context'
{
imageNode.update(with: imageNode)
planeNode.addChildNode(imageNode)
if imageAnchor.referenceImage.name == "TamBDYCArd3" //the png image
{
if let bdayImage = ARImageAnchor(named: "assets/TamBDYCArd3")
{
if let imageNode = bdayImage.rootNode.childNode.first
{
imageNode.update(with: imageNode)
planeNode.addChildNode(imageNode)
planeNode.addChildNode(bdayImage)
}
}
}
}
return node
}
}
Any help is appreciated. I am a newb so thanks for your consideration and time.
I was expecting to render the 2D image over the plane. Thanks

Switch video source Agora.io

My agora app has a custom video source as ARView that I transfer using ARVideoKit. How can I implement switching to the front camera?
My initial idea was just to set local video, but it does nothing
#objc private func switchCamera() {
captureType = captureType == .ar ? .camera : .ar
setCaptureType(to: captureType)
}
private func stopScene(){
arRecorder.rest()
sceneView.session.pause()
}
private func startScene() {
sceneView.session.run(configuration)
arRecorder.prepare(configuration)
}
private func setCaptureType(to type: CaptureType) {
switch type {
case .ar:
startScene()
agoraKit.disableVideo()
agoraKit.setVideoSource(arVideoSource)
case .camera:
stopScene()
agoraKit.enableVideo()
agoraKit.setVideoSource(nil)
let videoCanvas = AgoraRtcVideoCanvas()
videoCanvas.uid = 0
videoCanvas.renderMode = .hidden
videoCanvas.view = localVideoView
agoraKit.setupLocalVideo(videoCanvas)
}}
Basically, I need to stop ARSession, probably remove the custom video source and set local video as input.
To set ARView as a video source I followed this tutorial
You don't need to switch the camera source for Agora, instead you should update the ARKit config to use the front camera
class ViewController: UIViewController, ARSCNViewDelegate, RenderARDelegate, RecordARDelegate {
weak var cameraFlipBtn : UIButton!
enum cameraFacing {
case front
case back
}
var activeCam: cameraFacing = .back
override func viewDidLoad() {
super.viewDidLoad()
// Configure ARKit Session
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = [.horizontal, .vertical]
self.activeCam = .back // set the active camera
// Reverse camera button
if ARFaceTrackingConfiguration.isSupported {
// add reverse camera button
let reverseCameraBtn = UIButton()
reverseCameraBtn.frame = CGRect(x: self.view.bounds.maxX-75, y: 25, width: 50, height: 50)
if let imageReverseCameraBtn = UIImage(named: "cameraFlip") {
reverseCameraBtn.setImage(imageReverseCameraBtn, for: .normal)
}
self.view.insertSubview(reverseCameraBtn, at: 3)
self.cameraFlipBtn = reverseCameraBtn
}
self.cameraFlipBtn.addTarget(self, action: #selector(switchCamera), for: .touchDown)
}
#objc private func switchCamera() {
if self.activeCam == .back {
// switch to front config
let configuration = ARFaceTrackingConfiguration()
configuration.isLightEstimationEnabled = true
// run the config to swap the camera
self.sceneView.session.run(configuration, options: [.resetTracking, .removeExistingAnchors])
self.activeCam = .front
} else {
// switch to back cam config
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = [.horizontal, .vertical]
// run the config to swap the camera
self.sceneView.session.run(configuration, options: [.resetTracking, .removeExistingAnchors])
self.activeCam = .back
}
}
}
instead of enableVideo()/disableVideo() video, try:
self.agoraKit.enableLocalVideo(true/false)

Reality Composer default Anchor

When you create any scene with Reality Composer, you have to choose first what type of anchor "floor, wall, face, object" this mean when you load the scene it automatically places it self to the specified anchor.
My question is, Is there is any way to set it manually from code so that I would for example do a hit test and then anchor it to a specific point manually?
Thanks.
The official documentation has no reference to being able to change the default Anchor at runtime but from your description it sounds like you could try Select Object Anchoring to Place a Scene Near Detected Objects as described here:
https://developer.apple.com/documentation/realitykit/creating_3d_content_with_reality_composer/selecting_an_anchor_for_a_reality_composer_scene
You can easily apply other type of anchor (when implementing hit-testing or ray-casting) using the following code (default anchor in Reality Composer is .horizontal):
import ARKit
import RealityKit
#IBAction func onTap(_ sender: UITapGestureRecognizer) {
let estimatedPlane: ARRaycastQuery.Target = .estimatedPlane
let alignment: ARRaycastQuery.TargetAlignment = .vertical
let tapLocation: CGPoint = sender.location(in: arView)
let result: [ARRaycastResult] = arView.raycast(from: tapLocation,
allowing: estimatedPlane,
alignment: alignment)
guard let rayCast: ARRaycastResult = result.first
else { return }
let anchor = AnchorEntity(world: rayCast.worldTransform)
anchor.addChild(myScene)
arView.scene.anchors.append(anchor)
}
Or you can place anchors automatically (for example, like ARFaceAnchor for detected face):
extension ViewController: ARSessionDelegate {
func session(_ session: ARSession,didUpdate anchors: [ARAnchor]) {
guard let faceAnchor = anchors.first as? ARFaceAnchor
else { return }
let anchor = AnchorEntity(anchor: faceAnchor)
// RealityKit's Facial analog
// AnchorEntity(.face).self
anchor.addChild(glassModel)
arView.scene.anchors.append(anchor)
}
}
...or you can place ARImageAnchor the same way:
extension ViewController: ARSessionDelegate {
func session(_ session: ARSession, didUpdate anchors: [ARAnchor]) {
guard let imageAnchor = anchors.first as? ARImageAnchor,
let _ = imageAnchor.referenceImage.name
else { return }
let anchor = AnchorEntity(anchor: imageAnchor)
// RealityKit's image anchor analog
// AnchorEntity(.image(group: "Group", name: "model")).self
anchor.addChild(imageModel)
arView.scene.anchors.append(anchor)
}
}

Is it possible to detect object using CoreML model and find measurement of that object?

I want to detect object categories like door, window using CoreML and ARKit and I want to find measurements (like height, width and area) of a door.
How can I detect objects and add some overlay shape on that object so I could find real world position and measurement of that object?
Use ARKit's built-in object detection algorithm for that task. It's simple and power.
With ARKit's object detection you can detect your door (preliminary scanned or shot on smartphone).
The following code helps you detect real world objects (like door) and place 3D object or 3D text at ARObjectAnchor position:
import ARKit
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer,
didAdd node: SCNNode,
for anchor: ARAnchor) {
if let _ = anchor as? ARObjectAnchor {
let text = SCNText(string: "SIZE OF THIS OBJECT IS...",
extrusionDepth: 0.05)
text.flatness = 0.5
text.font = UIFont.boldSystemFont(ofSize: 10)
let textNode = SCNNode(geometry: text)
textNode.geometry?.firstMaterial?.diffuse.contents = UIColor.white
textNode.scale = SCNVector3(0.01, 0.01, 0.01)
node.addChildNode(textNode)
}
}
}
And supply an Xcode's folder Resources with images of your real-life objects.
class ViewController: UIViewController {
#IBOutlet var sceneView: ARSCNView!
let configuration = ARWorldTrackingConfiguration()
override func viewDidLoad() {
super.viewDidLoad()
sceneView.debugOptions = .showFeaturePoints
sceneView.delegate = self
guard let dObj = ARReferenceObject.referenceObjects(inGroupNamed: "Resources",
bundle: nil)
else {
fatalError("There's no reference image")
return
}
configuration.detectionObjects = dObj
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
}

Trouble with object recognition in UITapGestureRecognizer

I am trying to call the handleTap() function as shown below and let result: AnyObject look for objects in another class that extends to SCNScene
override func viewDidLoad()
{
super.viewDidLoad()
...
// add a tap gesture recognizer
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(handleTap(_:)))
self.sceneView.addGestureRecognizer(tapGesture)
}
//(HANDLES A TAP BY THE USER) NOT WORKING
func handleTap(_ gestureRecognize: UIGestureRecognizer)
{
print("tap")
//retrieve the SCNView
let scnView = self.view as! SCNView
//check what nodes are tapped
let p = gestureRecognize.location(in: scnView)
let hitResults = scnView.hitTest(p, options: [:])
//check that we clicked on at least one object
if hitResults.count > 0
{
//retrieved the first clicked object
let result: AnyObject = hitResults[0]
//get its material
let material = result.node!.geometry!.firstMaterial!
//highlight it
SCNTransaction.begin()
SCNTransaction.animationDuration = 0.2
}
The function works however it is supposed to highlight a node which is located in another class AppScene that extends to SCNScene
class AppScene: SCNScene
{
//objects are located in here
}
I am trying to find a way to make the handleTap() function to search for objects that are children of AppScene() as it doesn't work right now.