Add text to a recognized image via ARKIT - swift

Right now I have a simple image detection, with an overlay of a SCNPlane.
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let imageAnchor = anchor as? ARImageAnchor {
let plane = SCNPlane(width: imageAnchor.referenceImage.physicalSize.width, height: imageAnchor.referenceImage.physicalSize.height)
plane.firstMaterial?.diffuse.contents = UIColor(white: 1, alpha: 0.5)
let planeNode = SCNNode(geometry: plane)
planeNode.eulerAngles.x = -.pi / 2
node.addChildNode(planeNode)
}
return node
}
Instead of the image overlay, I want to display a simple text on the right side of the recognized image.
I already tried:
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
let text = SCNText(string: "testtext", extrusionDepth: 1)
let material = SCNMaterial()
material.diffuse.contents = UIColor.green
text.materials = [material]
let textNode = SCNNode(geometry: text)
node.addChildNode(textNode)
return node
}
What I am doing wrong here?

Related

How can I replay the paused (removed) video

I am creating a AR Application to track 2 images and play videos for that images. The problem I got is I am not able to replay the first video because it get paused (and removed).
I tried to create a storage for removed videos, and then used that stored data to replay that video. But it doesnt work as desired.
The video which is removed first start playing at every image after scanning the 1st image again.
The Code:
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let imageAnchor = anchor as? ARImageAnchor {
let videos = ["harrypotter": "harrypotter.mp4", "deatheater": "deatheater.mp4"]
if let videoName = videos[imageAnchor.referenceImage.name!] {
if let currentVideoNode = currentVideoNode {
currentVideoNode.pause()
currentVideoNode.removeFromParent()
}
let videoNode = SKVideoNode(fileNamed: videoName)
videoNode.play()
currentVideoNode = videoNode
let videoScene = SKScene(size: CGSize(width: 480, height: 360))
videoNode.position = CGPoint(x: videoScene.size.width / 2, y: videoScene.size.height / 2)
videoNode.yScale = -1.0
videoScene.addChild(videoNode)
let plane = SCNPlane(width: imageAnchor.referenceImage.physicalSize.width, height: imageAnchor.referenceImage.physicalSize.height)
plane.firstMaterial?.diffuse.contents = videoScene
let planeNode = SCNNode(geometry: plane)
planeNode.eulerAngles.x = -.pi / 2
node.addChildNode(planeNode)
}
}
return node
}
}

How can I reuse an ARAnchor created for a an ARReferenceObject so that the augmentation of the object can be started later with a button?

Am new to programming so I fear the answer might be obvious but I can’t find it anywhere and would be very grateful for some help!
Am trying to augment a soda can with different AR Experiences that one can activate by pressing on different buttons.
As long as I only add only one AR- experience immediately with the func renderer function (func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode?) everything works fine.
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let objectAnchor = anchor as? ARObjectAnchor {
print ("Yes, we can!")
node.position = SCNVector3Make(objectAnchor.referenceObject.center.x, objectAnchor.referenceObject.center.y, objectAnchor.referenceObject.center.z)
node.geometry = SCNBox(width: 0.05, height: 0.05, length: 0.05 , chamferRadius: 0)
node.geometry?.firstMaterial?.diffuse.contents = UIColor.blue
self.sceneView.scene.rootNode.addChildNode(node)
}
return node
}
The box is attached to the middle of the soda can:
enter image description here
But I don’t want the augmentation to beginn automatically when the soda can is detected but only when the button is pressed. So I tried to split the code. First the soda can is detected and an ARanchor is created and added to an Array:
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let objectAnchor = anchor as? ARObjectAnchor {
print ("Yes, we can!")
node.position = SCNVector3Make(objectAnchor.referenceObject.center.x, objectAnchor.referenceObject.center.y, objectAnchor.referenceObject.center.z)
objectAnchors.append(objectAnchor)
self.sceneView.scene.rootNode.addChildNode(node)
}
return node
}
And then I try to retrieve the ARanchor when I press on the button:
#IBAction func startParkour(_ sender: Any) {
print("Button pressed!")
let objectAnchor = objectAnchors[0]
let node = SCNNode()
node.geometry = SCNBox(width: 0.05, height: 0.05, length: 0.05 , chamferRadius: 0)
node.geometry?.firstMaterial?.diffuse.contents = UIColor.green
node.position = SCNVector3Make(objectAnchor.referenceObject.center.x, objectAnchor.referenceObject.center.y, objectAnchor.referenceObject.center.z)
self.sceneView.scene.rootNode.addChildNode(node)
But the result is unfortunately that now the object is added to point where I held my camera when I pressed the button rather than to the soda can:
enter image description here
Does anyone know what I am doing wrong?

Which image is being used as reference in ARSceneView?

I am currently making an AR app that uses image tracking.
I want it to have 2 images in the reference images folder (Image A: ~670x210, and Image B: ~1123x794).
How do I check which image is the camera detecting?
For example, if the camera is seeing Image A - I want to make a plane twice the size of the physical object, if it's seeing Image B - I want to make the plane the same size as the physical object.
Here's a snippet of my code, if needed:
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Create a session configuration
let configuration = ARImageTrackingConfiguration()
if let trackedImage = ARReferenceImage.referenceImages(inGroupNamed: "ARpaperImage", bundle: Bundle.main) {
configuration.trackingImages = trackedImage
configuration.maximumNumberOfTrackedImages = 1
}
// Run the view's session
sceneView.session.run(configuration)
}
// MARK: - ARSCNViewDelegate
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let imageAnchor = anchor as? ARImageAnchor {
let plane = SCNPlane(width: imageAnchor.referenceImage.physicalSize.width, height: imageAnchor.referenceImage.physicalSize.height)
plane.firstMaterial?.diffuse.contents = UIColor(white: 1.0, alpha: 0,5)
let planeNode = SCNNode(geometry: plane)
planeNode.eulerAngles.x = -.pi / 2
node.addChildNode(planeNode)
}
return node
}
An ARReferenceImage has a name property which you can use to determine which ReferenceImage has been detected which is simply:
A descriptive name for the image
As such when you put your ARReferenceImages into your resource bundle you can and (probably already have) given them a name e.g:
You can the use these names within the following delegate callback to handle your requirements.
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor)
As such lets assume we have two ARReferenceImages called Brown and Choco. Using these names we can then create logic to display different content e.g:
//-------------------------
//MARK: - ARSCNViewDelegate
//-------------------------
extension ViewController: ARSCNViewDelegate{
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have Detected An ARImageAnchor
guard let validAnchor = anchor as? ARImageAnchor, let referenceImageName = validAnchor.referenceImage.name else { return }
//2. Get The Physical Size Of The Reference Image (This Is Specificed When You Create Your Target In The ARResource Bundle)
let physicalSizeOfReferenceImage = CGSize(width: validAnchor.referenceImage.physicalSize.width, height: validAnchor.referenceImage.physicalSize.height)
///3. Log The Data For Debugging Purposes
print("""
Detected ARAnchorID = \(validAnchor.identifier)
Detected Reference Image Name = \(referenceImageName)
Detected Reference Image Physical Size = (width) \(physicalSizeOfReferenceImage.width),
Detected Reference Image Physical Size = (height) \(physicalSizeOfReferenceImage.height)
""")
//4. Perform Instanciation & Other Login Based On The Reference Image Detected
if referenceImageName == "Brown"{
node.addChildNode(planeNodeOfSize(CGSize(width: physicalSizeOfReferenceImage.width * 2, height: physicalSizeOfReferenceImage.height * 2)))
}else if referenceImageName == "Choco"{
node.addChildNode(planeNodeOfSize(CGSize(width: physicalSizeOfReferenceImage.width, height: physicalSizeOfReferenceImage.height)))
}
}
/// Generates An SCNNode With An SCNPlaneGeometry Of A Specified Width & Height
///
/// - Parameter size: CGSize
/// - Returns: SCNNode
func planeNodeOfSize(_ size: CGSize) -> SCNNode{
//1. Create An SCNPlane Of Our Chosen Size
let planeNode = SCNNode()
let planeGeometry = SCNPlane(width: size.width, height: size.height )
planeGeometry.firstMaterial?.diffuse.contents = UIColor.white
planeNode.opacity = 0.25
planeNode.geometry = planeGeometry
//2. Rotate The PlaneNode To Horizontal
planeNode.eulerAngles.x = -.pi/2
//3. Return Our Node
return planeNode
}
}
Hope it helps...

Displaying an "ARAnchor" in ARSCNView

In ARKit we can visualise Feature Points' Cloud detected in a ARSession via .showFeaturePoints Type Property:
self.sceneView.debugOptions = [ARSCNDebugOptions.showFeaturePoints]
Also, we can display a coordinate axis visualization indicating the position and orientation of the AR World Coordinate System:
static let showWorldOrigin: SCNDebugOptions
But is it possible to show ARAnchors in ARSCNView?
And if yes, how could we do it?
Just to follow up on #sj-r and #Rickster's comments.
The example code that #Rickster was talking about in regard to the coordinateOrigin.scn is found here: Creating Face Based Experiences
And here is a little snippet I have used before to visualize Axis:
class BMOriginVisualizer: SCNNode {
//----------------------
//MARK: - Initialization
//---------------------
/// Creates An AxisNode To Vizualize ARAnchors
///
/// - Parameter scale: CGFloat
init(scale: CGFloat = 1) {
super.init()
//1. Create The X Axis
let xNode = SCNNode()
let xNodeGeometry = SCNBox(width: 1, height: 0.01, length: 0.01, chamferRadius: 0)
xNode.geometry = xNodeGeometry
xNodeGeometry.firstMaterial?.diffuse.contents = UIColor.red
xNode.position = SCNVector3(0.5, 0, 0)
self.addChildNode(xNode)
//2. Create The Y Axis
let yNode = SCNNode()
let yNodeGeometry = SCNBox(width: 0.01, height: 1, length: 0.01, chamferRadius: 0)
yNode.geometry = yNodeGeometry
yNode.position = SCNVector3(0, 0.5, 0)
yNodeGeometry.firstMaterial?.diffuse.contents = UIColor.green
self.addChildNode(yNode)
//3. Create The Z Axis
let zNode = SCNNode()
let zNodeGeometry = SCNBox(width: 0.01, height: 0.01, length: 1, chamferRadius: 0)
zNode.geometry = zNodeGeometry
zNodeGeometry.firstMaterial?.diffuse.contents = UIColor.blue
zNode.position = SCNVector3(0, 0, 0.5)
self.addChildNode(zNode)
//4. Scale Our Axis
self.scale = SCNVector3(scale, scale, scale)
}
required init?(coder aDecoder: NSCoder) { fatalError("Vizualizer Coder Not Implemented") }
}
Which can be initialised like so:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
let anchorVizualizer = BMOriginVisualizer(scale: 0.5)
node.addChildNode(anchorVizualizer)
}
Hopefully this will provide useful as an expansion to the answer provided by #sj-r.
ARAnchor only represents 'position and orientation'. Things you can see are SCNNodes.
You can attach a node for each anchor you add via a method in ARSCNViewDelegate
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
//create a node so you can visualize the location.
let sphereNode = SCNNode(geometry: SCNSphere(radius: 0.5))
sphereNode.geometry?.firstMaterial?.diffuse.contents = UIColor.blue
return sphereNode
}
This is called after you add an anchor (or when the system adds anchors such as when you have plane detection or image/object detection turned on)
sceneView.session.add(anchor:)

How to play local video When Image is recognized using Arkit in Swift?

I have image recognizes by using AR kit ,when detect image I need to show and play the video on presented scene (like above the detected image)?
lazy var fadeAndSpinAction: SCNAction = {
return .sequence([
.fadeIn(duration: fadeDuration),
.rotateBy(x: 0, y: 0, z: CGFloat.pi * 360 / 180, duration: rotateDuration),
.wait(duration: waitDuration),
.fadeOut(duration: fadeDuration)
])
}()
lazy var fadeAction: SCNAction = {
return .sequence([
.fadeOpacity(by: 0.8, duration: fadeDuration),
.wait(duration: waitDuration),
.fadeOut(duration: fadeDuration)
])
}()
lazy var fishNode: SCNNode = {
guard let scene = SCNScene(named: "Catfish1.scn"),
let node = scene.rootNode.childNode(withName: "Catfish1", recursively: false) else { return SCNNode() }
let scaleFactor = 0.005
node.scale = SCNVector3(scaleFactor, scaleFactor, scaleFactor)
node.eulerAngles.x = -.pi / 2
return node
}()
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
sceneView.delegate = self
configureLighting()
}
func configureLighting() {
sceneView.autoenablesDefaultLighting = true
sceneView.automaticallyUpdatesLighting = true
}
override func viewWillAppear(_ animated: Bool) {
resetTrackingConfiguration()
}
func resetTrackingConfiguration() {
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else { return }
let configuration = ARWorldTrackingConfiguration()
configuration.detectionImages = referenceImages
let options: ARSession.RunOptions = [.resetTracking, .removeExistingAnchors]
sceneView.session.run(configuration, options: options)
statusLabel.text = "Move camera around to detect images"
}
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
DispatchQueue.main.async {
guard let imageAnchor = anchor as? ARImageAnchor,
let imageName = imageAnchor.referenceImage.name else { return }
// TODO: Overlay 3D Object
let overlayNode = self.getNode(withImageName: imageName)
overlayNode.opacity = 0
overlayNode.position.y = 0.2
overlayNode.runAction(self.fadeAndSpinAction)
node.addChildNode(overlayNode)
self.statusLabel.text = "Image detected: \"\(imageName)\""
self.videoNode.geometry = SCNPlane(width: 1276.0 / 2.0, height: 712.0 / 2.0)
self.spriteKitScene.scaleMode = .aspectFit
self.videoSpriteKitNode?.position = CGPoint(x: self.spriteKitScene.size.width / 2.0, y: self.spriteKitScene.size.height / 2.0)
self.videoSpriteKitNode?.size = self.spriteKitScene.size
self.spriteKitScene.addChild(self.videoSpriteKitNode!)
self.videoNode.geometry?.firstMaterial?.diffuse.contents = self.spriteKitScene
var transform = SCNMatrix4MakeRotation(Float(M_PI), 0.0, 0.0, 1.0)
transform = SCNMatrix4Translate(transform, 1.0, 1.0, 0)
self.videoNode.geometry?.firstMaterial?.diffuse.contentsTransform = transform
self.videoNode.position = SCNVector3(x: 0, y: 30, z: 7)
node.addChildNode(self.videoNode)
self.videoSpriteKitNode?.play()
}
}
func getPlaneNode(withReferenceImage image: ARReferenceImage) -> SCNNode {
let plane = SCNPlane(width: image.physicalSize.width,
height: image.physicalSize.height)
let node = SCNNode(geometry: plane)
return node
}`
Looking at your code, firstly you are setting your SCNPlane to be 638 Metres wide and 356 Meters tall, I'm sure thats not what you actually want ^________^.
Anyway, here is an example of playing a local video using an SKScene & SKVideoNode which works well:
//--------------------------
// MARK: - ARSCNViewDelegate
//--------------------------
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have An ARImageAnchor And Have Detected Our Reference Image
guard let imageAnchor = anchor as? ARImageAnchor else { return }
let referenceImage = imageAnchor.referenceImage
//2. Get The Physical Width & Height Of Our Reference Image
let width = CGFloat(referenceImage.physicalSize.width)
let height = CGFloat(referenceImage.physicalSize.height)
//3. Create An SCNNode To Hold Our Video Player With The Same Size As The Image Target
let videoHolder = SCNNode()
let videoHolderGeometry = SCNPlane(width: width, height: height)
videoHolder.transform = SCNMatrix4MakeRotation(-Float.pi / 2, 1, 0, 0)
videoHolder.geometry = videoHolderGeometry
//4. Create Our Video Player
if let videoURL = Bundle.main.url(forResource: "BlackMirrorz", withExtension: "mp4"){
setupVideoOnNode(videoHolder, fromURL: videoURL)
}
//5. Add It To The Hierarchy
node.addChildNode(videoHolder)
}
/// Creates A Video Player As An SCNGeometries Diffuse Contents
///
/// - Parameters:
/// - node: SCNNode
/// - url: URL
func setupVideoOnNode(_ node: SCNNode, fromURL url: URL){
//1. Create An SKVideoNode
var videoPlayerNode: SKVideoNode!
//2. Create An AVPlayer With Our Video URL
let videoPlayer = AVPlayer(url: url)
//3. Intialize The Video Node With Our Video Player
videoPlayerNode = SKVideoNode(avPlayer: videoPlayer)
videoPlayerNode.yScale = -1
//4. Create A SpriteKitScene & Postion It
let spriteKitScene = SKScene(size: CGSize(width: 600, height: 300))
spriteKitScene.scaleMode = .aspectFit
videoPlayerNode.position = CGPoint(x: spriteKitScene.size.width/2, y: spriteKitScene.size.height/2)
videoPlayerNode.size = spriteKitScene.size
spriteKitScene.addChild(videoPlayerNode)
//6. Set The Nodes Geoemtry Diffuse Contenets To Our SpriteKit Scene
node.geometry?.firstMaterial?.diffuse.contents = spriteKitScene
//5. Play The Video
videoPlayerNode.play()
videoPlayer.volume = 0
}
}
Update:
If you want to place the video above the target you can do something like the following:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have An ARImageAnchor And Have Detected Our Reference Image
guard let imageAnchor = anchor as? ARImageAnchor else { return }
let referenceImage = imageAnchor.referenceImage
//2. Get The Physical Width & Height Of Our Reference Image
let width = CGFloat(referenceImage.physicalSize.width)
let height = CGFloat(referenceImage.physicalSize.height)
//3. Create An SCNNode To Hold Our Video Player
let videoHolder = SCNNode()
let planeHeight = height/2
let videoHolderGeometry = SCNPlane(width: width, height: planeHeight)
videoHolder.transform = SCNMatrix4MakeRotation(-Float.pi / 2, 1, 0, 0)
videoHolder.geometry = videoHolderGeometry
//4. Place It About The Target
let zPosition = height - (planeHeight/2)
videoHolder.position = SCNVector3(0, 0, -zPosition)
//5. Create Our Video Player
if let videoURL = Bundle.main.url(forResource: "BlackMirrorz", withExtension: "mp4"){
setupVideoOnNode(videoHolder, fromURL: videoURL)
}
//5. Add It To The Hierachy
node.addChildNode(videoHolder)
}
Hope it helps...