Implement a crosshair kind behaviour in RealityKit - swift

What I want to achieve: Attach a sphere to the camera position (so that it always stay at the center of the screen as the device move) and detect when it is on top of other AR objects - to trigger other actions/behaviour on the AR objects.
Approach: I have created the sphere and attached to the center of the screen as shown below
#IBOutlet var arView: ARView!
override func viewDidLoad() {
super.viewDidLoad()
let mesh = MeshResource.generateSphere(radius: 0.1)
let sphere = ModelEntity(mesh: mesh)
let anchor = AnchorEntity(.camera)
sphere.setParent(anchor)
arView.scene.addAnchor(anchor)
sphere.transform.translation.z = -0.75
}
Next step, perform a hittest or a raycast in session(_:didUpdate:)
let results = arView.hitTest(CGPoint(x: 0.5, y: 0.5), query: .all, mask: .default)
//normalised center ; 2D position of the camera (our sphere) in the view’s coordinate system
But I am constantly getting ground plane as my result with this approach. Is there something I am missing or there is a different approach to achieving this
Note: Just in case there is something wrong I have created my basic scene as I want to track an image and add content on top of the image marker in Reality Composer and using the .rcproject in Xcode also have enabled collision property for all the overlaid items.

Try the following solution:
import ARKit
import RealityKit
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
var sphere: ModelEntity?
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let touch = arView.center
let results: [CollisionCastHit] = arView.hitTest(touch)
if let result: CollisionCastHit = results.first {
if result.entity.name == "Cube" && sphere?.isAnchored == true {
print("BOOM!")
}
}
}
override func viewDidLoad() {
super.viewDidLoad()
// Crosshair
let mesh01 = MeshResource.generateSphere(radius: 0.01)
sphere = ModelEntity(mesh: mesh01)
sphere?.transform.translation.z = -0.15
let cameraAnchor = AnchorEntity(.camera)
sphere?.setParent(cameraAnchor)
arView.scene.addAnchor(cameraAnchor)
// Model for collision
let mesh02 = MeshResource.generateBox(size: 0.3)
let box = ModelEntity(mesh: mesh02, materials: [SimpleMaterial()])
box.generateCollisionShapes(recursive: true)
box.name = "Cube"
let planeAnchor = AnchorEntity(.plane(.any,
classification: .any,
minimumBounds: [0.2, 0.2]))
box.setParent(planeAnchor)
arView.scene.addAnchor(planeAnchor)
}
}

Related

How to change the color of a draggable and gravitational UIView when touches the bottom edge of the screen

I'm coding a simple game, basically the game starts with a blue square who falls after a few seconds and the user can drag it and drop it, and the square will fall again.
I would like to use a function that changes the color of the blue square when this touches the bottom edge of the screen.
Here is the code:
import UIKit
import CoreMotion
class CanvasViewController: UIViewController {
#IBOutlet weak var blueSquare: UIView!
var animator: UIDynamicAnimator!
var gravity: UIGravityBehavior!
var motion: CMMotionManager!
var queue: OperationQueue! // used for updating UI objects with motion
var panGesture = UIPanGestureRecognizer()
let colors = [UIColor.blue, UIColor.red, UIColor.yellow, UIColor.green]
override func viewDidLoad() {
super.viewDidLoad()
queue = OperationQueue.current
animator = UIDynamicAnimator(referenceView: self.view)
gravity = UIGravityBehavior(items: [blueSquare])
motion = CMMotionManager()
panGesture = UIPanGestureRecognizer(target: self, action: #selector(CanvasViewController.draggedView(_:)))
blueSquare.isUserInteractionEnabled = true
blueSquare.addGestureRecognizer(panGesture)
animator.addBehavior(gravity)
// the objects that responds to collision
let collision = UICollisionBehavior(items: [blueSquare])
// the boundary AKA the borders. In this case if the full ViewController view
collision.addBoundary(withIdentifier: "borders" as NSCopying, for: UIBezierPath(rect: self.view.frame))
animator.addBehavior(collision)
//Elasticity
let bounce = UIDynamicItemBehavior(items: [blueSquare])
bounce.elasticity = 8
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
self.tabBarController?.tabBar.isHidden = true
}
#objc func draggedView(_ sender:UIPanGestureRecognizer){
self.view.bringSubviewToFront(blueSquare)
let translation = sender.translation(in: self.view)
blueSquare.center = CGPoint(x: blueSquare.center.x + translation.x, y: blueSquare.center.y + translation.y)
sender.setTranslation(CGPoint.zero, in: self.view)
animator.updateItem(usingCurrentState: blueSquare)
}
}
Your UICollisionBehavior is the one who make a border at the bottom edge of the screen and start bouncing. So you need to take the delegate of UICollisionBehavior in order for you to know when need to change color
let colors = [UIColor.blue, UIColor.red, UIColor.yellow, UIColor.green]
var currentColorIndex = 0
override func viewDidLoad() {
// your others code
// the objects that responds to collision
let collision = UICollisionBehavior(items: [blueSquare])
collision.collisionDelegate = self // get collision delegate
// the boundary AKA the borders. In this case if the full ViewController view
collision.addBoundary(withIdentifier: "borders" as NSCopying, for: UIBezierPath(rect: self.view.frame))
animator.addBehavior(collision)
}
Make a delegate for changing view color
extension CanvasViewController : UICollisionBehaviorDelegate {
func collisionBehavior(_ behavior: UICollisionBehavior, beganContactFor item: UIDynamicItem, withBoundaryIdentifier identifier: NSCopying?, at p: CGPoint) {
// when it touch the bottom edge and the collision start
currentColorIndex = currentColorIndex + 1 >= colors.count ? 0 : currentColorIndex + 1
blueSquare.backgroundColor = colors[currentColorIndex]
}
func collisionBehavior(_ behavior: UICollisionBehavior, endedContactFor item: UIDynamicItem, withBoundaryIdentifier identifier: NSCopying?) {
// when it bounce
}
}
The result

RealityKit – Convert CGPoint to a SIMD<Float>

I am using RealityKit and when I tap on the screen, I just want to add a box on that place where I tapped. It can be on the middle of the screen or anywhere. I will adjust Z axis so the object is placed 1 meter away from me. But I am having a hard time converting CGPoint to relevant RealityKit coordinates.
#objc func handleTap(_ recognizer: UITapGestureRecognizer) {
guard let view = view else { return }
let location = recognizer.location(in: view)
// location to SIMD3<Float> so I can create an anchor
}
Any ideas?
Model on a detected plane
CGPoint is a XY-point on iPhone's screen, so there's no need to convert it to XYZ-point. If you need a 3D point on a detected plane for accomodating your model, all you have to do is to generate an object from ARRaycastResult (or ARHitTestResult in case you're using ARKit+SceneKit).
import RealityKit
import ARKit
var arView = ARView(frame: .zero)
#objc func tappingScreen(_ sender: UITapGestureRecognizer) {
let results: [ARRaycastResult] = arView.raycast(from: arView.center,
allowing: .estimatedPlane,
alignment: .horizontal)
if let result: ARRaycastResult = results.first {
let model = ModelEntity(mesh: .generateSphere(radius: 0.02))
let anchor = AnchorEntity(world: result.worldTransform)
anchor.addChild(model)
arView.scene.anchors.append(anchor)
}
}
Model at camera's position
Creating a model exactly where ARCamera is located in the current frame is super easy:
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let model = ModelEntity(mesh: .generateSphere(radius: 0.02))
let anchor = AnchorEntity(world: arView.cameraTransform.matrix)
anchor.addChild(model)
model.position.z = -0.5 // 50 cm offset
arView.scene.anchors.append(anchor)
}
Also, you can multiply camera's transform by desired offset:
var translation = matrix_identity_float4x4
translation.columns.3.z = -0.5 // 50 cm offset
// Multiplying camera matrix by offset matrix
let transform = simd_mul(arView.cameraTransform.matrix, translation)
model.transform.matrix = transform
Additional info
This post is also helpful.

AR objects not anchoring or sizing correctly in RealityKit

I have an AR scene with two objects, one brown cow and one black one. They're both supposed to be displayed in the scene, distanced a little apart. I originally only had the brown cow, which was just a little bit too big. I changed something, which I can't remember, and now my scene is from the inside of the cow, and I can't exit the cow's corpse. It seems like it moves around when I do. I think the issue is because of a positive number for the [minimum bounds]but I'm not entirely sure. I've set the z axis for the cow as well.How can I make the cow a little bit smaller and about 5-7 yards away from me at spawn?
import UIKit
import RealityKit
import ARKit
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
override func viewDidLoad() {
super.viewDidLoad()
arView.session.delegate = self
showModel()
overlayCoachingView()
setupARView()
arView.addGestureRecognizer(UITapGestureRecognizer(target: self, action:
#selector(handleTap(recognizer:))))
}
func showModel(){
let anchorEntity = AnchorEntity(plane: .horizontal, minimumBounds:[0.7, 0.7])
let entity = try! Entity.loadModel(named: "COW_ANIMATIONS")
entity.setParent(anchorEntity)
arView.scene.addAnchor(anchorEntity)
}
func overlayCoachingView () {
let coachingView = ARCoachingOverlayView(frame: CGRect(x: 0, y: 0, width:
arView.frame.width, height: arView.frame.height))
coachingView.session = arView.session
coachingView.activatesAutomatically = true
coachingView.goal = .horizontalPlane
view.addSubview(coachingView)
}
// Load the "Box" scene from the "Experience" Reality File
// let boxAnchor = try! Experience.loadBox()
// Add the box anchor to the scene
//arView.scene.anchors.append(boxAnchor)
func setupARView(){
arView.automaticallyConfigureSession = false
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = [.horizontal, .vertical]
configuration.environmentTexturing = .automatic
arView.session.run(configuration)
}
//object placement
#objc
func handleTap(recognizer: UITapGestureRecognizer){
let location = recognizer.location(in:arView)
let results = arView.raycast(from: location, allowing: .estimatedPlane, alignment: .horizontal)
if let firstResult = results.first {
let anchor = ARAnchor(name: "COW_ANIMATIONS", transform: firstResult.worldTransform)
arView.session.add(anchor: anchor)
} else {
print("Object placement failed - couldn't find surface.")
//cow animations
let robot = try! ModelEntity.load(named: "COW_ANIMATIONS")
let anchor = AnchorEntity()
anchor.children.append(robot)
arView.scene.anchors.append(anchor)
robot.playAnimation(robot.availableAnimations[0].repeat(duration: .infinity),
transitionDuration: 0.5,
startsPaused: false)
//start cow animation
let brownCow = try! ModelEntity.load(named: "COW_ANIMATIONS")
let blackCow = try! ModelEntity.load(named: "Cow")
brownCow.position.x = -1.0
blackCow.position.x = 1.0
brownCow.setParent(anchor)
blackCow.setParent(anchor)
arView.scene.anchors.append(anchor)
let cowAnimationResource = brownCow.availableAnimations[0]
let horseAnimationResource = blackCow.availableAnimations[0]
brownCow.playAnimation(cowAnimationResource.repeat(duration: .infinity),
transitionDuration: 1.25,
startsPaused: false)
blackCow.playAnimation(horseAnimationResource.repeat(duration: .infinity),
transitionDuration: 0.75,
startsPaused: false)
//end cow animations
func placeObject(named entityName: String, for anchor: ARAnchor) {
let entity = try! ModelEntity.loadModel(named: entityName)
entity.generateCollisionShapes(recursive: true)
arView.installGestures([.rotation, .translation], for: entity)
let anchorEntity = AnchorEntity(anchor: anchor)
anchorEntity.addChild(entity)
arView.scene.addAnchor(anchorEntity)
}
}
extension ViewController: ARSessionDelegate {
func session( session: ARSession, didAdd anchors: [ARAnchor]) {
for anchor in anchors {
if let anchorName = anchor.name, anchorName == "COW_ANIMATIONS" {
placeObject(named: anchorName, for: anchor)
} }
}
}
First step
In RealityKit, if a model was tethered with its personal anchor (the case when one anchor holds just one model), you have two ways to scale it:
cowEntity.scale = [0.7, 0.7, 0.7]
// or
cowAnchor.scale = SIMD3<Float>([1, 1, 1] * 0.7)
and you have minimum two ways to position cow model along any axis (for instance along Z axis):
cowEntity.position = SIMD3<Float>(0, 0,-2)
// or
cowAnchor.position.z = -2.0
So, as you see, when you transform cowAnchor, all its children get this transformation as well.
Second step
You need to appropriately place a model's pivot point in 3D authoring app. At the moment RealityKit doesn't have a tool to fix pivot's position as you can do in SceneKit using simdPivot instance property.

How to make RealityKit to show only CollisionComponents?

I am trying to see the CollisionComponents on my ARView.
I used the .showPhysics as part of the debugOptions, but since I have 20 objects on screen, I get all the normals going crazy and the color of the CollisionComponents in unclear (some form of weird pink).
Does anyone have any idea how to present only the CollisionComponents without any extra data as part of the .showPhysics?
You can extend a standard functionality of RealityKit's ARView by using simple Swift extension:
import RealityKit
import ARKit
fileprivate extension ARView.DebugOptions {
func showCollisions() -> ModelEntity {
print("Code for visualizing collision objects goes here...")
let vc = ViewController()
let box = MeshResource.generateBox(size: 0.04)
let color = UIColor(white: 1.0, alpha: 0.15)
let colliderMaterial = UnlitMaterial(color: color)
vc.visualCollider = ModelEntity(mesh: box,
materials: [colliderMaterial])
return vc.visualCollider
}
}
...and then call this method in ViewController when you're tapping on a screen:
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
let anchor = AnchorEntity()
var ballEntity = ModelEntity()
var visualCollider = ModelEntity()
var sphere: MeshResource?
#IBAction func onTap(_ sender: UITapGestureRecognizer) {
sphere = MeshResource.generateSphere(radius: 0.02)
let material = SimpleMaterial(color: .systemPink,
isMetallic: false)
ballEntity = ModelEntity(mesh: sphere!,
materials: [material])
let point: CGPoint = sender.location(in: arView)
guard let query = arView.makeRaycastQuery(from: point,
allowing: .estimatedPlane,
alignment: .any)
else { return }
let result = arView.session.raycast(query)
guard let raycastResult = result.first
else { return }
let anchor = AnchorEntity(raycastResult: raycastResult)
anchor.addChild(ballEntity)
arView.scene.anchors.append(anchor)
let showCollisions = arView.debugOptions.showCollisions() // here it is
ballEntity.addChild(showCollisions)
ballEntity.generateCollisionShapes(recursive: true)
}
}
Please consider, it's an approximate visualization. This code just shows you a way to go on.

How to add a cube in the center of the screen, and so that it never leaves?

Good afternoon!
I'm new to SceneKit. And I can not solve this problem.
I need to get the cube has always been in the center of the screen and followed the camera until it finds a horizontal and stand on it.
And I have it stays in one place
Here is my simple code right now.
import UIKit
import SceneKit
import ARKit
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
override func viewDidLoad() {
super.viewDidLoad()
let scene = SCNScene()
let boxGeometry = SCNBox(width: 0.2, height: 0.2, length: 0.2, chamferRadius: 0)
let material = SCNMaterial()
material.diffuse.contents = UIColor.blue
material.specular.contents = UIColor(white: 0.6, alpha: 1.0)
let boxNode = SCNNode(geometry: boxGeometry)
boxNode.geometry?.materials = [material]
boxNode.position = SCNVector3(0,0,-1.0)
scene.rootNode.addChildNode(boxNode)
sceneView.scene = scene
//------------------------------------
// Set the view's delegate
sceneView.delegate = self
// Show statistics such as fps and timing information
sceneView.showsStatistics = true
// Set the scene to the view
sceneView.scene = scene
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Create a session configuration
let configuration = ARWorldTrackingConfiguration()
// Run the view's session
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
// Pause the view's session
sceneView.session.pause()
}
}
Help me please. Do not swear)
Here is an update to your code that ensure's the box is always at the center of the screen. Once the code detects a plane, it set's the box's parent as the plane anchor.
This is all very primitive, but should help you. If you want the node to float in the center of the screen, uncomment the SCNTransactions within the willRenderScene callback. If you want the box to always face the user, you can add a lookAtConstraint
import UIKit
import SceneKit
import ARKit
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
var boxNode: SCNNode? // keep a reference to the cube
override func viewDidLoad() {
super.viewDidLoad()
let scene = SCNScene()
let boxNode = createBox()
scene.rootNode.addChildNode(boxNode)
self.boxNode = boxNode
sceneView.scene = scene
//------------------------------------
// Set the view's delegate
sceneView.delegate = self
// Show statistics such as fps and timing information
sceneView.showsStatistics = true
// Set the scene to the view
sceneView.scene = scene
}
func createBox() -> SCNNode {
let boxGeometry = SCNBox(width: 0.2, height: 0.2, length: 0.2, chamferRadius: 0)
let material = SCNMaterial()
material.diffuse.contents = UIColor.blue
material.specular.contents = UIColor(white: 0.6, alpha: 1.0)
let boxNode = SCNNode(geometry: boxGeometry)
boxNode.geometry?.materials = [material]
return boxNode;
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Create a session configuration
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = .horizontal
// Run the view's session
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
// Pause the view's session
sceneView.session.pause()
}
// on willRender update the cube's position.
func renderer(_ renderer: SCNSceneRenderer, willRenderScene scene: SCNScene, atTime time: TimeInterval) {
// get camera translation and rotation
guard let pointOfView = sceneView.pointOfView else { return }
let transform = pointOfView.transform // transformation matrix
let orientation = SCNVector3(-transform.m31, -transform.m32, -transform.m33) // camera rotation
let location = SCNVector3(transform.m41, transform.m42, transform.m43) // camera translation
let currentPostionOfCamera = orientation + location
// SCNTransaction.begin()
if let boxNode = self.boxNode {
boxNode.position = currentPostionOfCamera
}
// SCNTransaction.commit()
}
// detect plances
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard anchor is ARPlaneAnchor else { return }
if let boxNode = self.boxNode {
let newBoxNode = createBox() // create a new node for the center of the screen
self.boxNode = newBoxNode
SCNTransaction.begin()
boxNode.removeFromParentNode()
node.addChildNode(boxNode) // set the current box to the plane.
sceneView.scene.rootNode.addChildNode(newBoxNode) // add the new box node to the scene
SCNTransaction.commit()
}
}
}
extension SCNVector3 {
static func + (left: SCNVector3, right: SCNVector3) -> SCNVector3 {
return SCNVector3Make(left.x + right.x, left.y + right.y, left.z + right.z)
}
}