How to detect the 2D images using ARKit and RealityKit - swift

I want to detect the 2D images using ARKit and RealityKit. I don't want to use SceneKit because many implementations based on RealityKit. I couldn't find any examples detecting images on RealityKit. I referred https://developer.apple.com/documentation/arkit/detecting_images_in_an_ar_experience sample code from apple. It uses Scenekit and ARSCNViewDelegate
let arConfiguration = ARWorldTrackingConfiguration()
arConfiguration.planeDetection = [.vertical, .horizontal]
arConfiguration.isLightEstimationEnabled = true
arConfiguration.environmentTexturing = .automatic
if let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "sanitzer", bundle: nil) {
arConfiguration.maximumNumberOfTrackedImages = 1
arConfiguration.detectionImages = referenceImages
}
self.session.run(arConfiguration, options: [.resetTracking, .removeExistingAnchors])
I have implemented ARSessionDelegate but not able to detect image?
func session(_ session: ARSession, didAdd anchors: [ARAnchor]) {
//how to capture image anchor?
}
func session(_ session: ARSession, didUpdate anchors: [ARAnchor]) {
//how to capture image anchor?
}
Apple has implemented ARSCNViewDelegate capture the detected images. What is the equivalent delegate for ARSCNViewDelegate in RealityKit? How to detect ARImageAnchor?

In ARKit/RealityKit project use the following code for session() instance methods:
import ARKit
import RealityKit
class ViewController: UIViewController, ARSessionDelegate {
func session(_ session: ARSession, didUpdate anchors: [ARAnchor]) {
guard let imageAnchor = anchors.first as? ARImageAnchor,
let _ = imageAnchor.referenceImage.name
else { return }
let anchor = AnchorEntity(anchor: imageAnchor)
// Add Model Entity to anchor
anchor.addChild(model)
arView.scene.anchors.append(anchor)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
arView.session.delegate = self
resetTrackingConfig()
}
func resetTrackingConfig() {
guard let refImg = ARReferenceImage.referenceImages(inGroupNamed: "Sub",
bundle: nil)
else { return }
let config = ARWorldTrackingConfiguration()
config.detectionImages = refImg
config.maximumNumberOfTrackedImages = 1
let options = [ARSession.RunOptions.removeExistingAnchors,
ARSession.RunOptions.resetTracking]
arView.session.run(config, options: ARSession.RunOptions(options))
}
}
And take into consideration – a folder for reference images (in .png or .jpg format) must have an extension .arresourcegroup.

Related

How to set SCNView camera from ARFrame camera?

I created a SCNView in ViewController, and I have an ARFrame information, how do I set the SCNView camera node so that it is the same as the ARFrame camera ? I don't want to use ARSCNView.
ARSession can be run as standalone entity (it must not necessarily be connected to a view). Use the following code to get what you expect:
import SceneKit
import ARKit
class ViewController: UIViewController {
#IBOutlet var sceneKitView: SCNView!
let session = ARSession()
var arCameraTransform: simd_float4x4?
let scnCamera = SCNNode()
override func viewDidLoad() {
super.viewDidLoad()
sceneKitView.scene = SCNScene()
sceneKitView.backgroundColor = .black
let cylinder = SCNCylinder(radius: 0.1, height: 1)
cylinder.firstMaterial?.diffuse.contents = UIColor.red
let node = SCNNode(geometry: cylinder)
node.position.z = -0.75
sceneKitView.scene?.rootNode.addChildNode(node)
scnCamera.camera = SCNCamera()
sceneKitView.scene?.rootNode.addChildNode(scnCamera)
self.session.delegate = self
self.session.run(ARWorldTrackingConfiguration())
}
}
...
extension ViewController: ARSessionDelegate {
func session(_ session: ARSession, didUpdate frame: ARFrame) {
self.arCameraTransform = frame.camera.transform
scnCamera.simdTransform = self.arCameraTransform!
print(sceneKitView.pointOfView?.simdTransform.columns.3 as Any)
}
}
P.S.
This works for .landscape orientation...

How to chain multiple collada animations using Swift in Xcode?

I am having trouble loading four collada animations in a sequence. Using this code, the animations start at the same time and there is no sequence between each other. I would like to star with SU.dae then FTW.dae and so on, such as large animation using Xcode.
Does anyone know how to properly fix this issues?
import UIKit
import SceneKit
import ARKit
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
var animations = [String: CAAnimation]()
var Stand_Up:Bool = true
override func viewDidLoad() {
super.viewDidLoad()
// Set the view's delegate
sceneView.delegate = self
// Show statistics such as fps and timing information
sceneView.showsStatistics = true
// Create a new scene
let scene = SCNScene()
// Set the scene to the view
sceneView.scene = scene
// Loaf the DAE amimation
loadAnimations (sceneName: "art.scnassets/animation/SU.dae")
loadAnimations (sceneName: "art.scnassets/animation/FTW.dae")
loadAnimations (sceneName: "art.scnassets/animation/R.dae")
loadAnimations (sceneName: "art.scnassets/animation/BAE.dae")
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Create a session configuration
let configuration = ARWorldTrackingConfiguration()
// Run the view's session
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
// Pause the view's session
sceneView.session.pause()
}
func loadAnimations (sceneName:String) {
// Load the character in the idle animation
let idleScene = SCNScene(named: sceneName)!
// This node will be parent of all the animation models
let node = SCNNode()
// Add all the child nodes to the parent node
for child in idleScene.rootNode.childNodes {
node.addChildNode(child)
}
// Set up some properties
node.position = SCNVector3(0, -1, -2)
node.scale = SCNVector3(0.2, 0.2, 0.2)
// Add the node to the scene
sceneView.scene.rootNode.addChildNode(node)
animateEntireNodeTreeOnce(mostRootNode: node)
}
func animateEntireNodeTreeOnce(mostRootNode node: SCNNode){
onlyAnimateThisNodeOnce(node)
for childNode in node.childNodes {
animateEntireNodeTreeOnce(mostRootNode: childNode)
}
}
func onlyAnimateThisNodeOnce(_ node: SCNNode) {
if node.animationKeys.count > 0 {
for key in node.animationKeys {
let animation = node.animation(forKey: key)!
animation.repeatCount = 1
animation.duration = 5;
animation.isRemovedOnCompletion = false
node.removeAllAnimations()
node.addAnimation(animation, forKey: key)
}
}
}
func session(_ session: ARSession, didFailWithError error: Error) {
// Present an error message to the user
}
func sessionWasInterrupted(_ session: ARSession) {
// Inform the user that the session has been interrupted, for example, by presenting an overlay
}
func sessionInterruptionEnded(_ session: ARSession) {
// Reset tracking and/or remove existing anchors if consistent tracking is required
}
}

Reality Composer default Anchor

When you create any scene with Reality Composer, you have to choose first what type of anchor "floor, wall, face, object" this mean when you load the scene it automatically places it self to the specified anchor.
My question is, Is there is any way to set it manually from code so that I would for example do a hit test and then anchor it to a specific point manually?
Thanks.
The official documentation has no reference to being able to change the default Anchor at runtime but from your description it sounds like you could try Select Object Anchoring to Place a Scene Near Detected Objects as described here:
https://developer.apple.com/documentation/realitykit/creating_3d_content_with_reality_composer/selecting_an_anchor_for_a_reality_composer_scene
You can easily apply other type of anchor (when implementing hit-testing or ray-casting) using the following code (default anchor in Reality Composer is .horizontal):
import ARKit
import RealityKit
#IBAction func onTap(_ sender: UITapGestureRecognizer) {
let estimatedPlane: ARRaycastQuery.Target = .estimatedPlane
let alignment: ARRaycastQuery.TargetAlignment = .vertical
let tapLocation: CGPoint = sender.location(in: arView)
let result: [ARRaycastResult] = arView.raycast(from: tapLocation,
allowing: estimatedPlane,
alignment: alignment)
guard let rayCast: ARRaycastResult = result.first
else { return }
let anchor = AnchorEntity(world: rayCast.worldTransform)
anchor.addChild(myScene)
arView.scene.anchors.append(anchor)
}
Or you can place anchors automatically (for example, like ARFaceAnchor for detected face):
extension ViewController: ARSessionDelegate {
func session(_ session: ARSession,didUpdate anchors: [ARAnchor]) {
guard let faceAnchor = anchors.first as? ARFaceAnchor
else { return }
let anchor = AnchorEntity(anchor: faceAnchor)
// RealityKit's Facial analog
// AnchorEntity(.face).self
anchor.addChild(glassModel)
arView.scene.anchors.append(anchor)
}
}
...or you can place ARImageAnchor the same way:
extension ViewController: ARSessionDelegate {
func session(_ session: ARSession, didUpdate anchors: [ARAnchor]) {
guard let imageAnchor = anchors.first as? ARImageAnchor,
let _ = imageAnchor.referenceImage.name
else { return }
let anchor = AnchorEntity(anchor: imageAnchor)
// RealityKit's image anchor analog
// AnchorEntity(.image(group: "Group", name: "model")).self
anchor.addChild(imageModel)
arView.scene.anchors.append(anchor)
}
}

Is it possible to detect object using CoreML model and find measurement of that object?

I want to detect object categories like door, window using CoreML and ARKit and I want to find measurements (like height, width and area) of a door.
How can I detect objects and add some overlay shape on that object so I could find real world position and measurement of that object?
Use ARKit's built-in object detection algorithm for that task. It's simple and power.
With ARKit's object detection you can detect your door (preliminary scanned or shot on smartphone).
The following code helps you detect real world objects (like door) and place 3D object or 3D text at ARObjectAnchor position:
import ARKit
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer,
didAdd node: SCNNode,
for anchor: ARAnchor) {
if let _ = anchor as? ARObjectAnchor {
let text = SCNText(string: "SIZE OF THIS OBJECT IS...",
extrusionDepth: 0.05)
text.flatness = 0.5
text.font = UIFont.boldSystemFont(ofSize: 10)
let textNode = SCNNode(geometry: text)
textNode.geometry?.firstMaterial?.diffuse.contents = UIColor.white
textNode.scale = SCNVector3(0.01, 0.01, 0.01)
node.addChildNode(textNode)
}
}
}
And supply an Xcode's folder Resources with images of your real-life objects.
class ViewController: UIViewController {
#IBOutlet var sceneView: ARSCNView!
let configuration = ARWorldTrackingConfiguration()
override func viewDidLoad() {
super.viewDidLoad()
sceneView.debugOptions = .showFeaturePoints
sceneView.delegate = self
guard let dObj = ARReferenceObject.referenceObjects(inGroupNamed: "Resources",
bundle: nil)
else {
fatalError("There's no reference image")
return
}
configuration.detectionObjects = dObj
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
}

Q: How to add multiple objects AR dectection

I added an ar function into my app to dectect our products. One object is working but i want to add multiple arobject files. I scanned some objects and added them into my ar recource group.
I created a product.sks and added a label + background.
My first question: How can i fix the label to one object? I have two arobjects at the moment so i need 2 different labels. How can i differ the correct label to an object?
My second question: At the moment my HU label is fixed and doesnt move when i'm moving my iPhone. I tried to change some positions etc. but its always fixed.
How can i make it move?
Thanks in advance. I hope my concern is detailed enough
import UIKit
import SceneKit
import ARKit
class ARViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
sceneView.showsStatistics = true
let scene = SCNScene(named: "art.scnassets/scene.scn")!
sceneView.scene = scene
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
let configuration = ARWorldTrackingConfiguration()
configuration.detectionObjects = ARReferenceObject.referenceObjects(inGroupNamed: "Module", bundle: Bundle.main)!
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
return node
}
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let objectAnchor = anchor as? ARObjectAnchor {
let plane = SCNPlane(width: CGFloat(objectAnchor.referenceObject.extent.x * 0.8), height: CGFloat(objectAnchor.referenceObject.extent.y * 0.5))
plane.cornerRadius = plane.width * 0.125
let displayScene = SKScene(fileNamed: "product")
plane.firstMaterial?.diffuse.contents = displayScene
plane.firstMaterial?.isDoubleSided = true
plane.firstMaterial?.diffuse.contentsTransform = SCNMatrix4Translate(SCNMatrix4MakeScale(1, -1, 1), 0, 1, 0)
let planeNode = SCNNode(geometry: plane)
planeNode.position = SCNVector3Make(objectAnchor.referenceObject.center.x, objectAnchor.referenceObject.center.y + 0.12, objectAnchor.referenceObject.center.z)
node.addChildNode(planeNode)
}
return node
}
func session(_ session: ARSession, didFailWithError error: Error) {
// Present an error message to the user
}
func sessionWasInterrupted(_ session: ARSession) {
// Inform the user that the session has been interrupted, for example, by presenting an overlay
}
func sessionInterruptionEnded(_ session: ARSession) {
// Reset tracking and/or remove existing anchors if consistent tracking is required
}
}
Use anchor.referenceObject.name?
Move how? If you want it to always face the phone, apply billboard constraint.