I want to link two 3D models to two different trackers. I snap the tracker to the reference photo, but I don’t understand how tracking two objects at the same time in ARImageTrackingConfiguration
guard let trackedImages = ARReferenceImage.referenceImages(inGroupNamed: "Photos2", bundle: Bundle.main) else {
print("No images available")
return
}
I snap the tracker to the reference photo, but I don’t understand how tracking two objects at the same time in ARImageTrackingConfiguration
You need to put it into ViewController:
class ViewController: UIViewController {
func trackingConfig() {
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources",
bundle: nil)
else {
return
}
let config = ARWorldTrackingConfiguration()
config.detectionImages = referenceImages
let options: ARSession.RunOptions = [.resetTracking,
.removeExistingAnchors]
sceneView.session.run(config, options: options)
}
}
Then create an extension containing renderer method an a switch inside function:
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer,
didAdd node: SCNNode,
for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor,
let imageName = imageAnchor.referenceImage.name
else {
return
}
let geometryNode = nodeGetter(name: imageName)
node.addChildNode(geometryNode)
}
func nodeGetter(name: String) -> SCNNode {
var node = SCNNode()
switch name {
case "geometry_01": node = geoOne
case "geometry_02": node = geoTwo
default: break
}
return node
}
}
Related
I want to detect QR codes in the vertical plane and place a node on top of the detected QR. For QR detection, I used Vision framework and Arkit to place the nodes as below code. Whenever placing the node, it is not attached to the QR code and placed somewhere else.
Could someone help to figure out what I have done wrong?
class ViewController: UIViewController, ARSCNViewDelegate,ARSessionDelegate{
#IBOutlet var sceneView: ARSCNView!
var qrRequests = [VNRequest]()
var detectedDataAnchor: ARAnchor?
var processing = false
let configuration = ARWorldTrackingConfiguration()
override func viewDidLoad() {
super.viewDidLoad()
self.sceneView.delegate = self
self.sceneView.session.delegate = self
self.sceneView.session.run(configuration)
startQrCodeDetection()
}
func startQrCodeDetection() {
let request = VNDetectBarcodesRequest(completionHandler: self.requestHandler)
self.qrRequests = [request]
}
public func session(_ session: ARSession, didUpdate frame: ARFrame) {
DispatchQueue.global(qos: .userInitiated).async {
do {
if self.processing {
return
}
self.processing = true
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: frame.capturedImage,
options: [:])
try imageRequestHandler.perform(self.qrRequests)
} catch {
}
}
}
func requestHandler(request: VNRequest, error: Error?) {
if let results = request.results, let result = results.first as? VNBarcodeObservation {
guard let payload = result.payloadStringValue else {return}
var rect = result.boundingBox
let center = CGPoint(x: rect.midX, y: rect.midY)
DispatchQueue.main.async {
self.hitTestQrCode(center: center)
self.processing = false
}
} else {
self.processing = false
}
}
func hitTestQrCode(center: CGPoint) {
print("Hit Test")
if let hitTestResults = self.sceneView?.hitTest(center, types: [.featurePoint, .existingPlaneUsingExtent] ),
let hitTestResult = hitTestResults.first {
if let detectedDataAnchor = self.detectedDataAnchor,
let node = self.sceneView.node(for: detectedDataAnchor) {
node.transform = SCNMatrix4(hitTestResult.worldTransform)
} else {
self.detectedDataAnchor = ARAnchor(transform: hitTestResult.worldTransform)
self.sceneView.session.add(anchor: self.detectedDataAnchor!)
}
}
}
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
if self.detectedDataAnchor?.identifier == anchor.identifier {
let sphere = SCNSphere(radius: 0.02)
sphere.firstMaterial?.diffuse.contents = UIColor.red
let sphereNode = SCNNode(geometry: sphere)
sphereNode.transform = SCNMatrix4(anchor.transform)
return sphereNode
}
return nil
}
}
I've created an AR app that works pretty well, but I'm not a hug fan of the objects spawning in front of the camera every time. I would prefer to have them spawn in this field, further out for the camera, and facing predetermined directions. For example, I want a car to spawn in the same parking lot space every time, so when I walk out into the lot, I can see the car parked there like I left it, no matter which way I come at it from.
How can I spawn my objects based on their location? I would think it would have to do with replacing the plane detection with latitude and longitude coordinates, but I don't know how to go about this.
Any help is greatly appreciated!
import UIKit
import RealityKit
import ARKit
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
override func viewDidLoad() {
super.viewDidLoad()
arView.session.delegate = self
showModel()
overlayCoachingView()
setupARView()
arView.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(handleTap(recognizer:))))
}
func showModel(){
let anchorEntity = AnchorEntity(plane: .horizontal, minimumBounds:[0.2, 0.2])
let entity = try! Entity.loadModel(named: "COW_ANIMATIONS")
entity.setParent(anchorEntity)
arView.scene.addAnchor(anchorEntity)
}
func overlayCoachingView () {
let coachingView = ARCoachingOverlayView(frame: CGRect(x: 0, y: 0, width: arView.frame.width, height: arView.frame.height))
coachingView.session = arView.session
coachingView.activatesAutomatically = true
coachingView.goal = .horizontalPlane
view.addSubview(coachingView)
}
// Load the "Box" scene from the "Experience" Reality File
// let boxAnchor = try! Experience.loadBox()
// Add the box anchor to the scene
//arView.scene.anchors.append(boxAnchor)
func setupARView(){
arView.automaticallyConfigureSession = false
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = [.horizontal, .vertical]
configuration.environmentTexturing = .automatic
arView.session.run(configuration)
}
//object placement
#objc
func handleTap(recognizer: UITapGestureRecognizer){
let location = recognizer.location(in:arView)
let results = arView.raycast(from: location, allowing: .estimatedPlane, alignment: .horizontal)
if let firstResult = results.first {
let anchor = ARAnchor(name: "COW_ANIMATIONS", transform: firstResult.worldTransform)
arView.session.add(anchor: anchor)
} else {
print("Object placement failed - couldn't find surface.")
}
}
func placeObject(named entityName: String, for anchor: ARAnchor) {
let entity = try! ModelEntity.loadModel(named: entityName)
entity.generateCollisionShapes(recursive: true)
arView.installGestures([.rotation, .translation], for: entity)
let anchorEntity = AnchorEntity(anchor: anchor)
anchorEntity.addChild(entity)
arView.scene.addAnchor(anchorEntity)
}
}
extension ViewController: ARSessionDelegate {
func session( session: ARSession, didAdd anchors: [ARAnchor]) {
for anchor in anchors {
if let anchorName = anchor.name, anchorName == "COW_ANIMATIONS" {
placeObject(named: anchorName, for: anchor)
} }
}
}
For geo location Apple made ARGeoTrackingConfiguration with corresponding ARGeoAnchors.
let location = CLLocationCoordinate2D(latitude: -18.9137, longitude: 47.5361)
let geoAnchor = ARGeoAnchor(name: "Tana", coordinate: location, altitude: 1250)
arView.session.add(anchor: geoAnchor)
let realityKitAnchor = AnchorEntity(anchor: geoAnchor)
arView.scene.anchors.append(realityKitAnchor)
At the moment it's working in the current cities and areas.
You can also use getGeoLocation(forPoint:completionHandler:) instance method that converts a position in the framework’s local coordinate system to GPS latitude, longitude and altitude.
arView.session.getGeoLocation(forPoint: xyzWorld) { (coord, alt, error) in
let anchor = ARGeoAnchor(coordinate: coord, altitude: alt)
}
Currently, my app is tracking for an image. Once found, it places the 3D model right on top of it. My question is: is it possible to scale the USDZ/scene file to the plane automatically?
I'm currently tracking a postcard (5X7 in) and I want the model to simply sit on the postcard once detected (pretty small compared to most of my 3d models). I’m currently having to manually go in each file and scale it, but that’s taking a lot of manual time.
I figured there would be an easier way to programmatically scale the model, but not sure.
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
let configuration = ARImageTrackingConfiguration()
guard let trackedImages = ARReferenceImage.referenceImages(inGroupNamed: "Photos", bundle: Bundle.main) else {
print("No images available")
return
}
configuration.trackingImages = trackedImages
configuration.maximumNumberOfTrackedImages = 7
sceneView.session.run(configuration)
}
//MARK-:CODE WHERE I PLACE MY 3D MODEL
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let imageAnchor = anchor as? ARImageAnchor {
let plane = SCNPlane(width: imageAnchor.referenceImage.physicalSize.width, height: imageAnchor.referenceImage.physicalSize.height)
plane.firstMaterial?.diffuse.contents = UIColor(white: 1, alpha: 0.8)
let planeNode = SCNNode(geometry: plane)
planeNode.eulerAngles.x = -.pi / 2
guard let url = Bundle.main.url(forResource: "hamburguer", withExtension: "usdz") else { fatalError() }
let mdlAsset = MDLAsset(url: url)
let shipScene = SCNScene(mdlAsset: mdlAsset)
// let shipScene = SCNScene(named: "retro.scn")!
let shipNode = shipScene.rootNode.childNodes.first!
shipNode.position = SCNVector3Zero
shipNode.position.z = 0.15
planeNode.addChildNode(shipNode)
node.addChildNode(planeNode)
}
return node
}
}
You could try using something like the following:
let width = 10 //calculate the length of the target image
let nodeWidth = shipNodeLength //calcualate the length of the original model
if width > 0 {
if nodeWidth > width {
scale = width/nodeWidth
} else {
scale = nodeWidth/width
}
}
shipNode.scale = SCNVector3(scale, scale, scale)
Main Problem:
I am adding this section AFTER to clarify the problem. -- I can PAUSE my video (I do not want it playing on a loop). When my node comes into sight, my node plays my video, even if it is on pause. If my video has finished playing, and it comes into sight, it will restart. I want to REMOVE this behavior.
In my app, I have a SKVideoNode created from an AVPlayer(:URL) inside 3D Space using SCNNode objects and SCNGeometry objects. I use ARKit .ImageTracking to determine when a specific image is found, and play a video from there. All is good and dandy, except that the player determines to play on its own time, every time the AVPlayer comes into sight; however, it could be whenever the ARImageAnchor the SCNNode is attached to comes into sight. Either way, the AVPlayer is playing every time the node comes into sight of the camera lens. I use
override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey : Any]?, context: UnsafeMutableRawPointer?) {
if(keyPath == "rate") {
print((object as! AVPlayer).rate)
}
}
to print out the rate, and it is 1, however, it was 0.
I printed out some sort of print function print("Play") for all of my functions utilizing player.pause() or player.play() and none of them are called whenever the rate is changed above. How can I find the source of what is changing the rate of my player?
I checked the original rootnode, self.sceneview.scene.rootNode.childNodes to make sure I am not creating extra VideoNodes/SCNNodes/AVPlayers, etc, and it seems that there is only 1.
Any ideas on why the SKVideoNode/AVPlayer is playing as the SCNNode comes into sight of the camera using ARKit? Thanks in advance!
Edit1:
Made a workaround, to determine ONLY when a user clicked on this node
let tap = UITapGestureRecognizer(target: self, action: #selector(self!.tapGesture))
tap.delegate = self!
tap.name = "MyTap"
self!.sceneView.addGestureRecognizer(tap)
and then inside of this next function, I put
#objc func tapGesture(_ gesture:UITapGestureRecognizer) {
let tappedNodes = self.sceneView.hitTest(gesture.location(in: gesture.view), options: [SCNHitTestOption.searchMode: 1])
if !tappedNodes.isEmpty {
for nodes in tappedNodes {
if nodes.node == videoPlayer3D {
videoPlayer3D.tappedVideoPlayer = true
videoPlayer3D.playOrPause()
break
}
}
}
}
override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey : Any]?, context: UnsafeMutableRawPointer?) {
if(keyPath == "rate") {
print((object as! AVPlayer).rate)
if(!self.tappedVideoPlayer) {
self.player.pause() //HERE
}
}
}
where videoPlayer3D is the SCNNode that contains the SKVideoNode.
However, I get the error com.apple.scenekit.scnview-renderer (17): EXC_BAD_ACCESS (code=2, address=0x16d8f7ad0) on the section labeled "HERE" above. It seems that the renderer of the sceneview is attempting to alter my video node in the render function, although, I don't even use the renderer(updateAtTime:) function, I only use
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else { return }
createVideoNode(imageAnchor)
}
to determine when I see an image, i.e., imageTracking and I create the node. Any tips?
Thought 1
The error is presented stating that some method is being called from an SCNView object for the method renderer (that's what I'm understanding from the error), but I don't have the node specifically called. I think maybe a default action, as the node is coming to view, is being called, however, I'm not 100% sure on how to access it or determine which method. The objects I'm using are not SCNView objects, and I don't believe they inherit from SCNView objects (look at the 1st paragraph to see the variables used). Just looking to remove the "action" of the node playing every time it is in view.
ADDITION:
For the sake of following the creation of my video player if interested, here it is. Let me know if there is anything else you'd like to see (not sure what else you might want to see) and thanks for your help.
func createVideoNode(_ anchor:ARImageAnchor, initialPOV:SCNNode) -> My3DPlayer? {
guard let currentFrame = self.sceneView.session.currentFrame else {
return nil
}
let delegate = UIApplication.shared.delegate as! AppDelegate
var videoPlayer:My3DPlayer!
videoPlayer = delegate.testing ? My3DPlayer(data: nil, currentFrame: currentFrame, anchor: anchor) : My3DPlayer(data: self.urlData, currentFrame: currentFrame, anchor: anchor)
//Create TapGesture
let tap = UITapGestureRecognizer(target: self, action: #selector(self.tapGesture))
tap.delegate = self
tap.name = "MyTap"
self.sceneView.addGestureRecognizer(tap)
return videoPlayer
}
My3dPlayer Class:
class My3DPlayer: SCNNode {
init(geometry: SCNGeometry?) {
super.init()
self.geometry = geometry
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
convenience init(data:Data?, currentFrame:ARFrame, anchor:ARImageAnchor) {
self.init(geometry: nil)
self.createPlayer(currentFrame, data, anchor)
}
private func createPlayer(_ frame:ARFrame, _ data:Data?,_ anchor:ARImageAnchor) {
let physicalSize = anchor.referenceImage.physicalSize
print("Init Player W/ physicalSize: \(physicalSize)")
//Create video
if((UIApplication.shared.delegate! as! AppDelegate).testing) {
let path = Bundle.main.path(forResource: "Bear", ofType: "mov")
self.url = URL(fileURLWithPath: path!)
}
else {
let url = data!.getAVAssetURL(location: "MyLocation")
self.url = url
}
let asset = AVAsset(url: self.url)
let track = asset.tracks(withMediaType: AVMediaType.video).first!
let playerItem = AVPlayerItem(asset: asset)
let player = AVPlayer(playerItem: playerItem)
self.player = player
var videoSize = track.naturalSize.applying(track.preferredTransform)
videoSize = CGSize(width: abs(videoSize.width), height: abs(videoSize.height))
print("Init Video W/ size: \(videoSize)")
//Determine if landscape or portrait
self.landscape = videoSize.width > videoSize.height
print(self.landscape == true ? "Landscape" : "Portrait")
//Do something when video ended
NotificationCenter.default.addObserver(self, selector: #selector(playerDidFinishPlaying(note:)), name: NSNotification.Name.AVPlayerItemDidPlayToEndTime, object: nil)
//Add observer to determine when Player is ready
player.addObserver(self, forKeyPath: "status", options: [], context: nil)
//Create video Node
let videoNode = SKVideoNode(avPlayer: player)
//Create 2d scene to put 2d player on - SKScene
videoNode.position = CGPoint(x: videoSize.width/2, y: videoSize.height/2)
videoNode.size = videoSize
//Portrait -- //Landscape doesn't need adjustments??
if(!self.landscape) {
let width = videoNode.size.width
videoNode.size.width = videoNode.size.height
videoNode.size.height = width
videoNode.position = CGPoint(x: videoNode.size.width/2, y: videoNode.size.height/2)
}
let scene = SKScene(size: videoNode.size)
//Add videoNode to scene
scene.addChild(videoNode)
//Create Button-look even though we don't use the button. Just creates the illusion to pressing play and pause
let image = UIImage(named: "PlayButton")!
let texture = SKTexture(image: image)
self.button = SKSpriteNode(texture: texture)
self.button.position = videoNode.position
//Makes the button look like a square
let minimumSize = [videoSize.width, videoSize.height].min()!
self.button.size = CGSize(width: minimumSize/4, height: minimumSize/4)
scene.addChild(button)
//Get ratio difference from physicalsize and video size
let widthRatio = Float(physicalSize.width)/Float(videoSize.width)
let heightRatio = Float(physicalSize.height)/Float(videoSize.height)
let finalRatio = [widthRatio, heightRatio].min()!
//Create a Plane (SCNPlane) to put the SKScene on
let plane = SCNPlane(width: scene.size.width, height: scene.size.height)
plane.firstMaterial?.diffuse.contents = scene
plane.firstMaterial?.isDoubleSided = true
//Set Self.geometry = plane
self.geometry = plane
//Size the node correctly
//Find the real scaling variable
let scale = CGFloat(finalRatio)
let appearanceAction = SCNAction.scale(to: scale, duration: 0.4)
appearanceAction.timingMode = .easeOut
//Set initial scale to 0 then use action to scale up
self.scale = SCNVector3Make(0, 0, 0)
self.runAction(appearanceAction)
}
#objc func playerDidFinishPlaying(note: Notification) {
self.player.seek(to: .zero, toleranceBefore: .zero, toleranceAfter: .zero)
self.player.seek(to: .zero, toleranceBefore: .zero, toleranceAfter: .zero)
self.setButtonAlpha(alpha: 1)
}
}
Efforts1:
I have tried to stop tracking via:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else { return }
createVideoNode(imageAnchor)
self.resetConfiguration(turnOnConfig: true, turnOnImageTracking: false)
}
func resetConfiguration(turnOnConfig: Bool = true, turnOnImageTracking:Bool = false) {
let configuration = ARWorldTrackingConfiguration()
if(turnOnImageTracking) {
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else {
fatalError("Missing expected asset catalog resources.")
}
configuration.planeDetection = .horizontal
configuration.detectionImages = referenceImages
}
else {
configuration.planeDetection = []
}
if(turnOnConfig) {
sceneView.session.run(configuration, options: [.resetTracking])
}
}
Above, I have tried to reset the configuration. This only causes it to reset the planes it seems, as the video is still playing on render. Whether it is paused or finished, it will reset and start over or continue playing where left off.
Efforts2:
I have tried
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else { return }
createVideoNode(imageAnchor)
self.pauseTracking()
}
func pauseTracking() {
self.sceneView.session.pause()
}
This stops everything therefore the camera even freezes as nothing is being tracked. It is completely useless here.
Ok. So here is a fix. see renderer(_:updateAtTime:).
var player: AVPlayer!
var play = true
#objc func tap(_ recognizer: UITapGestureRecognizer){
if play{
play = false
player.pause()
}else{
play = true
player.play()
}
}
func setVideo() -> SKScene{
let size = CGSize(width: 500, height: 500)
let skScene = SKScene(size: size)
let videoURL = Bundle.main.url(forResource: "video.mp4", withExtension: nil)!
player = AVPlayer(url: videoURL)
skScene.scaleMode = .aspectFit
videoSpriteNode = SKVideoNode(avPlayer: player)
videoSpriteNode.position = CGPoint(x: size.width/2, y: size.height/2)
videoSpriteNode.size = size
videoSpriteNode.yScale = -1
skScene.addChild(videoSpriteNode)
player.play()
return skScene
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
if let image = anchor as? ARImageAnchor{
print("found")
let planeGeometry = SCNPlane(width: image.referenceImage.physicalSize.width, height: image.referenceImage.physicalSize.height)
let plane = SCNNode(geometry: planeGeometry)
planeGeometry.materials.first?.diffuse.contents = setVideo()
plane.transform = SCNMatrix4MakeRotation(-.pi/2, 1, 0, 0)
node.addChildNode(plane)
}
}
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
if !play{
player.pause()
}
}
Use this idea in your code.
I'm working on an application with Arkit. There are many 3D models and the size is big in my app. Can I get these models out of another server (outside sites)? I'm new on swift, I can't seem to find anything on loading a 3d model from a web server.
is it enough to change the model path there? Thank you
func loadModel() {
guard let virtualObjectScene = SCNScene(named: "\(modelName).\(fileExtension)", inDirectory: "Models.scnassets/\(modelName)") else {
return
}
let wrapperNode = SCNNode()
for child in virtualObjectScene.rootNode.childNodes {
let defaults = UserDefaults.standard
wrapperNode.addChildNode(child)
}
self.addChildNode(wrapperNode)
}
All code:
import UIKit
import SceneKit
import ARKit
class VirtualObject: SCNNode {
var modelName: String = ""
var fileExtension: String = ""
var thumbImage: UIImage!
var title: String = ""
var viewController: ViewController?
override init() {
super.init()
self.name = "Virtual object root node"
}
init(modelName: String, fileExtension: String, thumbImageFilename: String, title: String) {
super.init()
self.name = "Virtual object root node"
self.modelName = modelName
self.fileExtension = fileExtension
self.thumbImage = UIImage(named: thumbImageFilename)
self.title = title
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
func loadModel() {
guard let virtualObjectScene = SCNScene(named: "\(modelName).\(fileExtension)", inDirectory: "Models.scnassets/\(modelName)") else {
return
}
let wrapperNode = SCNNode()
for child in virtualObjectScene.rootNode.childNodes {
let defaults = UserDefaults.standard
wrapperNode.addChildNode(child)
}
self.addChildNode(wrapperNode)
}
func unloadModel() {
self.removeFromParentNode()
for child in self.childNodes {
child.removeFromParentNode()
}
}
func translateBasedOnScreenPos(_ pos: CGPoint, instantly: Bool, infinitePlane: Bool) {
guard let controller = viewController else {
return
}
let result = controller.worldPositionFromScreenPosition(pos, objectPos: self.position, infinitePlane: infinitePlane)
controller.moveVirtualObjectToPosition(result.position, instantly, !result.hitAPlane)
}
}
extension VirtualObject {
static func isNodePartOfVirtualObject(_ node: SCNNode) -> Bool {
if node.name == "Virtual object root node" {
return true
}
if node.parent != nil {
return isNodePartOfVirtualObject(node.parent!)
}
return false
}
static let availableObjects: [VirtualObject] = [
Anatomy()
]
}
you can load an scn file from a webserver with ip addresses like this (i used a fake ip below)
let myURL = NSURL(string: “http://110.151.153.202:80/scnfiles/myfile.scn”)
let scene = try! SCNScene(url: myURL! as URL, options:nil)
Edit:
Here’s a simple Swift PlayGrounds which pulls a test cube scn file from my github repo. You just tap anywhere and the cube loads.
import ARKit
import SceneKit
import PlaygroundSupport
class ViewController: NSObject {
var sceneView: ARSCNView
init(sceneView: ARSCNView) {
self.sceneView = sceneView
super.init()
self.setupWorldTracking()
self.sceneView.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(ViewController.handleTap(_:))))
}
private func setupWorldTracking() {
if ARWorldTrackingConfiguration.isSupported {
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = .horizontal
configuration.isLightEstimationEnabled = true
self.sceneView.session.run(configuration, options: [])
}
}
#objc func handleTap(_ gesture: UITapGestureRecognizer) {
let results = self.sceneView.hitTest(gesture.location(in: gesture.view), types: ARHitTestResult.ResultType.featurePoint)
guard let result: ARHitTestResult = results.first else {
return
}
// pulls cube.scn from github repo
let myURL = NSURL(string: "https://raw.githubusercontent.com/wave-electron/scnFile/master/cube.scn")
let scene = try! SCNScene(url: myURL! as URL, options: nil)
let node = scene.rootNode.childNode(withName: "SketchUp", recursively: true)
node?.scale = SCNVector3(0.01,0.01,0.01)
let position = SCNVector3Make(result.worldTransform.columns.3.x, result.worldTransform.columns.3.y, result.worldTransform.columns.3.z)
node?.position = position
self.sceneView.scene.rootNode.addChildNode(node!)
}
}
let sceneView = ARSCNView()
let viewController = ViewController(sceneView: sceneView)
sceneView.autoenablesDefaultLighting = true
PlaygroundPage.current.needsIndefiniteExecution = true
PlaygroundPage.current.liveView = viewController.sceneView