I'm trying to scan a Reference-Image an then display the image itself above the printed reference-image. The "virutal" image size should be the same like the printed size.
My idea: get the size of the printed Reference-Image, then scale the image in the SCNNode to this size (or scale the SCNNode to this size?)
But: 1-> How to get the size of the printed image, 2-> for scaling the SCNNode I need the size of this node, too. How to get it?
import UIKit
import SceneKit
import ARKit
import AVKit
import AVFoundation
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
private var planeNode: SCNNode?
private var imageNode: SCNNode?
private var animationInfo: AnimationInfo?
private var currentMediaName: String?
private var scrollView: UIScrollView!
override func viewDidLoad() {
super.viewDidLoad()
let scene = SCNScene()
sceneView.scene = scene
sceneView.delegate = self
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Load reference images to look for from "AR Resources" folder
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else {
fatalError("Missing expected asset catalog resources.")
}
// Create a session configuration
let configuration = ARWorldTrackingConfiguration()
// Add previously loaded images to ARScene configuration as detectionImages
configuration.detectionImages = referenceImages
// Run the view's session
sceneView.session.run(configuration)
let tap = UITapGestureRecognizer(target: self, action: #selector(handleTap(rec:)))
//Add recognizer to sceneview
sceneView.addGestureRecognizer(tap)
}
//Method called when tap
#objc func handleTap(rec: UITapGestureRecognizer){
let location: CGPoint = rec.location(in: sceneView)
let hits = self.sceneView.hitTest(location, options: nil)
if !hits.isEmpty{
let tappedNode = hits.first?.node
if tappedNode != nil && tappedNode?.name != nil{
let stringArr = tappedNode?.name?.components(separatedBy: "-")
let name = stringArr! [0]
let size = stringArr! [1].components(separatedBy: ",")
let width = Float(size [0])
let height = Float(size [1])
loadReferenceImage(tappedNode: tappedNode!, name: (name), width: width!, height: height!)
}
}
}
private func playVideo() {
guard let path = Bundle.main.path(forResource: "video", ofType:"m4v") else {
debugPrint("video.m4v not found")
return
}
let player = AVPlayer(url: URL(fileURLWithPath: path))
let playerController = AVPlayerViewController()
playerController.player = player
present(playerController, animated: true) {
player.play()
}
}
func loadReferenceImage(tappedNode: SCNNode, name: String, width: Float, height: Float){
print("TAP")
print(name)
let currentNode = tappedNode.parent
if let image = UIImage(named: "col" + name){
let childNodes = currentNode?.childNodes
for node in (childNodes)!{
node.removeFromParentNode()
}
let newImage = UIImage(named: "col" + name)
let newnode = SCNNode(geometry: SCNPlane(width: CGFloat(width), height: CGFloat(height)))
newnode.geometry?.firstMaterial?.diffuse.contents = newImage
newnode.scale = SCNVector3(x: 10, y: 10, z: 10)
currentNode?.removeAnimation(forKey: "spin_around")
let rotation = SCNVector3((currentNode?.eulerAngles.x)!-0.95,(currentNode?.eulerAngles.y)!,(currentNode?.eulerAngles.z)!)
currentNode?.eulerAngles = rotation
//SIZE??????
let nodex = currentNode?.scale.x
let nodey = currentNode?.scale.y
let nodez = currentNode?.scale.z
let factorx = width / nodex!
let factory = height / nodey!
currentNode?.addChildNode(newnode)
}
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else {
return
}
// 1. Load scene.
let planeScene = SCNScene(named: "art.scnassets/plane.scn")!
let planeNode = planeScene.rootNode.childNode(withName: "planeRootNode", recursively: true)!
// 2. Calculate size based on planeNode's bounding box.
let (min, max) = planeNode.boundingBox
let size = SCNVector3Make(max.x - min.x, max.y - min.y, max.z - min.z)
// 3. Calculate the ratio of difference between real image and object size.
// Ignore Y axis because it will be pointed out of the image.
let widthRatio = Float(imageAnchor.referenceImage.physicalSize.width)/1.2
let heightRatio = Float(imageAnchor.referenceImage.physicalSize.height)/1.2
let width = imageAnchor.referenceImage.physicalSize.width
let height = imageAnchor.referenceImage.physicalSize.height
let prefix = "-"
let imageSize = width.description + "," + height.description
let targetName = imageAnchor.referenceImage.name! + prefix + imageSize
// Pick smallest value to be sure that object fits into the image.
let finalRatio = [widthRatio, heightRatio].min()!
// 4. Set transform from imageAnchor data.
planeNode.transform = SCNMatrix4(imageAnchor.transform)
// 5. Animate appearance by scaling model from 0 to previously calculated value.
let appearanceAction = SCNAction.scale(to: CGFloat(finalRatio), duration: 0.4)
//test
appearanceAction.timingMode = .easeOut
// Set initial scale to 0.
planeNode.scale = SCNVector3Make(0 , 0, 0)
//rotate y
let spin = CABasicAnimation(keyPath: "rotation")
spin.fromValue = NSValue(scnVector4: SCNVector4(x: 0, y: 1, z: 0, w: 0))
spin.toValue = NSValue(scnVector4: SCNVector4(x: 0, y: 1, z: 0, w: Float(CGFloat(2 * Double.pi))))
spin.duration = 4
spin.repeatCount = .infinity
planeNode.addAnimation(spin, forKey: "spin_around")
// Add to root node.
sceneView.scene.rootNode.addChildNode(planeNode)
// Run the appearance animation.
planeNode.runAction(appearanceAction)
planeNode.name = targetName
let nodes = planeNode.childNodes
for node in nodes{
node.name = targetName
}
self.planeNode = planeNode
self.imageNode = node
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor, updateAtTime time: TimeInterval) {
guard let imageNode = imageNode, let planeNode = planeNode else {
return
}
// 1. Unwrap animationInfo. Calculate animationInfo if it is nil.
guard let animationInfo = animationInfo else {
refreshAnimationVariables(startTime: time,
initialPosition: planeNode.simdWorldPosition,
finalPosition: imageNode.simdWorldPosition,
initialOrientation: planeNode.simdWorldOrientation,
finalOrientation: imageNode.simdWorldOrientation)
return
}
// 2. Calculate new animationInfo if image position or orientation changed.
if !simd_equal(animationInfo.finalModelPosition, imageNode.simdWorldPosition) || animationInfo.finalModelOrientation != imageNode.simdWorldOrientation {
refreshAnimationVariables(startTime: time,
initialPosition: planeNode.simdWorldPosition,
finalPosition: imageNode.simdWorldPosition,
initialOrientation: planeNode.simdWorldOrientation,
finalOrientation: imageNode.simdWorldOrientation)
}
// 3. Calculate interpolation based on passedTime/totalTime ratio.
let passedTime = time - animationInfo.startTime
var t = min(Float(passedTime/animationInfo.duration), 1)
// Applying curve function to time parameter to achieve "ease out" timing
t = sin(t * .pi * 0.5)
// 4. Calculate and set new model position and orientation.
let f3t = simd_make_float3(t, t, t)
planeNode.simdWorldPosition = simd_mix(animationInfo.initialModelPosition, animationInfo.finalModelPosition, f3t)
planeNode.simdWorldOrientation = simd_slerp(animationInfo.initialModelOrientation, animationInfo.finalModelOrientation, t)
//planeNode.simdWorldOrientation = imageNode.simdWorldOrientation
guard let currentImageAnchor = anchor as? ARImageAnchor else { return }
}
func refreshAnimationVariables(startTime: TimeInterval, initialPosition: float3, finalPosition: float3, initialOrientation: simd_quatf, finalOrientation: simd_quatf) {
let distance = simd_distance(initialPosition, finalPosition)
// Average speed of movement is 0.15 m/s.
let speed = Float(0.15)
// Total time is calculated as distance/speed. Min time is set to 0.1s and max is set to 2s.
let animationDuration = Double(min(max(0.1, distance/speed), 2))
// Store animation information for later usage.
animationInfo = AnimationInfo(startTime: startTime,
duration: animationDuration,
initialModelPosition: initialPosition,
finalModelPosition: finalPosition,
initialModelOrientation: initialOrientation,
finalModelOrientation: finalOrientation)
}
}
In order to do this I believe that first you need to get the size in Pixels of the UIImage by
multiplying the size values by the value in the scale property to get
the pixel dimensions of the image.
As such an example would be something like so:
guard let image = UIImage(named: "launchScreen") else { return }
let pixelWidth = image.size.width * image.scale
let pixelHeight = image.size.height * image.scale
print(pixelWidth, pixelHeight)
The size of my image when made in Adobe Illustrator was 3072 x 4099, and when I logged the results in the console the dimensions were also the same.
Now the tricky part here is calculating the pixels to a size we can use in ARKit, remembering that different devices have a different PPI (Pixels Per Inch) density.
In my example I am just going to use the PPI of an iPhone7Plus which is 401.
//1. Get The PPI Of The iPhone7Plus
let iphone7PlusPixelsPerInch: CGFloat = 401
//2. To Get The Image Size In Inches We Need To Divide By The PPI
let inchWidth = pixelWidth/iphone7PlusPixelsPerInch
let inchHeight = pixelHeight/iphone7PlusPixelsPerInch
//3. Calculate The Size In Metres (There Are 2.54 Cm's In An Inch)
let widthInMetres = (inchWidth * 2.54) / 100
let heightInMeters = (inchHeight * 2.54) / 100
Now we have the size of our Image in Metres it is simple to create an SCNNode of that size e.g:
//1. Generate An SCNPlane With The Same Size As Our Image
let realScaleNode = SCNNode(geometry: SCNPlane(width: widthInMetres, height: heightInMeters))
realScaleNode.geometry?.firstMaterial?.diffuse.contents = image
realScaleNode.position = SCNVector3(0, 0, -1)
//2. Add It To Our Hierachy
self.augmentedRealityView.scene.rootNode.addChildNode(realScaleNode)
Hope it helps...
P.S: This may be useful for helping you get the PPI of the Screen (marchv/UIScreenExtension)
Related
I use AVCaptureSession to setup a camera view and using vision kit to detect and add a rectangular on the face.
Here is how I can do it
override func viewDidLoad() {
super.viewDidLoad()
self.prepareVisionRequest()
}
fileprivate func prepareVisionRequest() {
//self.trackingRequests = []
var requests = [VNTrackObjectRequest]()
let faceDetectionRequest = VNDetectFaceRectanglesRequest(completionHandler: { (request, error) in
if error != nil {
print("FaceDetection error: \(String(describing: error)).")
}
guard let faceDetectionRequest = request as? VNDetectFaceRectanglesRequest,
let results = faceDetectionRequest.results else {
return
}
DispatchQueue.main.async {
// Add the observations to the tracking list
for observation in results {
let faceTrackingRequest = VNTrackObjectRequest(detectedObjectObservation: observation)
faceTrackingRequest.trackingLevel = .fast
requests.append(faceTrackingRequest)
}
self.trackingRequests = requests
}
})
// Start with detection. Find face, then track it.
self.detectionRequests = [faceDetectionRequest]
self.sequenceRequestHandler = VNSequenceRequestHandler()
self.setupVisionDrawingLayers()
}
// MARK: Drawing Vision Observations
fileprivate func setupVisionDrawingLayers() {
let captureDeviceResolution = self.captureDeviceResolution
let captureDeviceBounds = CGRect(x: 0,
y: 0,
width: captureDeviceResolution.width,
height: captureDeviceResolution.height)
let captureDeviceBoundsCenterPoint = CGPoint(x: captureDeviceBounds.midX,
y: captureDeviceBounds.midY)
let normalizedCenterPoint = CGPoint(x: 0.5, y: 0.5)
guard let rootLayer = self.rootLayer else {
self.presentErrorAlert(message: "view was not property initialized")
return
}
let overlayLayer = CALayer()
overlayLayer.name = "DetectionOverlay"
overlayLayer.masksToBounds = true
overlayLayer.anchorPoint = normalizedCenterPoint
overlayLayer.bounds = captureDeviceBounds
overlayLayer.position = CGPoint(x: rootLayer.bounds.midX, y: rootLayer.bounds.midY)
let faceRectangleShapeLayer = CAShapeLayer()
faceRectangleShapeLayer.name = "RectangleOutlineLayer"
faceRectangleShapeLayer.bounds = captureDeviceBounds
faceRectangleShapeLayer.anchorPoint = normalizedCenterPoint
faceRectangleShapeLayer.position = captureDeviceBoundsCenterPoint
faceRectangleShapeLayer.fillColor = UIColor.white.withAlphaComponent(0.9).cgColor
// faceLandmarksShapeLayer.strokeColor = UIColor.white.withAlphaComponent(0.7).cgColor
faceRectangleShapeLayer.lineWidth = 5
faceRectangleShapeLayer.shadowOpacity = 0.7
faceRectangleShapeLayer.shadowRadius = 5
let faceLandmarksShapeLayer = CAShapeLayer()
faceLandmarksShapeLayer.name = "FaceLandmarksLayer"
faceLandmarksShapeLayer.bounds = captureDeviceBounds
faceLandmarksShapeLayer.anchorPoint = normalizedCenterPoint
faceLandmarksShapeLayer.position = captureDeviceBoundsCenterPoint
faceLandmarksShapeLayer.fillColor = nil
faceLandmarksShapeLayer.strokeColor = nil
//
overlayLayer.addSublayer(faceRectangleShapeLayer)
faceRectangleShapeLayer.addSublayer(faceLandmarksShapeLayer)
rootLayer.addSublayer(overlayLayer)
self.detectionOverlayLayer = overlayLayer
self.detectedFaceRectangleShapeLayer = faceRectangleShapeLayer
self.detectedFaceLandmarksShapeLayer = faceLandmarksShapeLayer
self.updateLayerGeometry()
}
Now, I'm trying three ways to take snapshots
1- using UIGraphicsImageRenderer, it shows only the rectangular on the face and the camera view in not visible - it's black
2- Take image from captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) - the image from the buffer shows only the camera view, no rectangular
3- use AVCapturePhotoCaptureDelegate to capture photo from AVCaptureSession, the captured photo shows only the camera view, no rectangular
Could you please help me to take snapshot that contains both camera view and the rectangular! Thanks
I am trying to follow the steps to create this following this article (image below from article):
It is basically recognising a face to put something on the face (a tattoo) and placing a background image behind it.
I am using an iPhone X device to test the code, but every time I test if .personSegmentation is supported, it is false:
if ARFaceTrackingConfiguration.supportsFrameSemantics(.personSegmentation) {
configuration.frameSemantics.insert(.personSegmentation) // code never executed.
}
My whole code for adding the plane to put on top of the face plus the background is:
The ARSCNViewDelegate delegate to add the nodes:
private var virtualBackgroundNode = SCNNode()
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
guard let device = sceneView.device else {
return nil
}
let faceGeometry = ARSCNFaceGeometry(device: device)
let faceNode = SCNNode(geometry: faceGeometry)
faceNode.geometry?.firstMaterial?.transparency = 0
let tattooPlane = SCNPlane(width: 0.13, height: 0.06)
tattooPlane.firstMaterial?.diffuse.contents = UIImage(named: "Tattoos/tattoo0")!
tattooPlane.firstMaterial?.isDoubleSided = true
let tattooNode = SCNNode()
tattooNode.position.z = faceNode.boundingBox.max.z * 3 / 4
tattooNode.position.y = 0.027
tattooNode.geometry = tattooPlane
faceNode.addChildNode(tattooNode)
configureBackgroundView()
sceneView.scene.rootNode.addChildNode(virtualBackgroundNode)
return faceNode
}
Resizing the background image, setting it and positioning the background node:
func configureBackgroundView() {
let (skScene, mediaAspectRatio) = makeImageBackgroundScene(image: UIImage(named: "Cats/cat0")!)
let size = skScene.size
virtualBackgroundNode.geometry = SCNGeometry.Plane(width: size.width, height: size.height)
let material = SCNMaterial()
material.diffuse.contents = skScene
virtualBackgroundNode.geometry?.materials = [material]
virtualBackgroundNode.scale = SCNVector3(1.7 * mediaAspectRatio, 1.7, 1)
let cameraPosition = sceneView.pointOfView?.scale
let position = SCNVector3(cameraPosition!.x, cameraPosition!.y, cameraPosition!.z - 1000)
virtualBackgroundNode.position = position
}
This method creates a SpriteKit image by resizing the original asset:
func makeImageBackgroundScene(image: UIImage) -> (scene: SKScene, mediaAspectRatio: Double) {
//Adjusted so that the aspect ratio of the image is not distorted
let width = image.size.width
let height = image.size.height
let mediaAspectRatio = Double(width / height)
let cgImage = image.cgImage!
let newImage = UIImage(cgImage: cgImage, scale: 1.0, orientation: .up)
let skScene = SKScene(size: CGSize(width: 1000 * mediaAspectRatio, height: 1000))
let texture = SKTexture(image: newImage)
let skNode = SKSpriteNode(texture:texture)
skNode.position = CGPoint(x: skScene.size.width / 2.0, y: skScene.size.height / 2.0)
skNode.size = skScene.size
skNode.yScale = -1.0
skScene.addChild(skNode)
return (skScene, mediaAspectRatio)
}
Any advice on what to try? Snapchat and TikTok have similar Face recognition + background setups and they work in my device.
Thanks for any help.
In an image detection app, the image is recognised, then an opaque overlay plane is created so when the user taps on the screen a hit test finds the overlay plane, and a new object can be created. But I want to position the object exactly at the centre of the underlying image. How can I get it to be always at the centre of the image / plane, and to have the same orientation. Can this be got from a hit test result? Thanks for any advice!
#objc func handleScreenTap(sender: UITapGestureRecognizer) {
let tappedSceneView = sender.view as! ARSCNView
let tapLocation = sender.location(in: tappedSceneView)
let planeIntersections = tappedSceneView.hitTest(tapLocation, types: [.estimatedHorizontalPlane, .estimatedVerticalPlane])
if !planeIntersections.isEmpty {
addSceneAtPositionOnPlane(hitTestResult: planeIntersections.first!)
}
func addSceneAtPositionOnPlane(hitTestResult: ARHitTestResult) {
let transform = hitTestResult.worldTransform
let positionColumn = transform.columns.3
let initialPosition = SCNVector3(positionColumn.x,
positionColumn.y,
positionColumn.z)
let node = self.createScene(for: initialPosition)
sceneView.scene.rootNode.addChildNode(node)
}
func createScene(for position: SCNVector3) -> SCNNode {
let box = SCNNode(geometry: SCNBox(width: 0.1, //x
height: 0.1, //y
length: 0.1, //z
chamferRadius: 0))
box.geometry?.firstMaterial?.diffuse.contents = UIColor.red
box.geometry?.firstMaterial?.isDoubleSided = true
box.opacity = 0.8
box.position = position
return box
}
if you already added a SCNNode to render a plane on top of the detected image, then you could just use the SceneKit hitTest method that returns a SceneKit node vs. trying to hit test against ARKit geometry.
Once you have the plane you added to the scene you can just add your new geometry as a child of that node.
Here is an example where once the image is detected a plane is drawn on top of it, then when the user clicks on the plane a box is added as a child, the box will then follow the tracked image around and have the correct position and orientation.
import UIKit
import SceneKit
import ARKit
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(onTap))
sceneView.addGestureRecognizer(tapGesture)
}
#objc func onTap(_ recognizer: UITapGestureRecognizer) {
let point = recognizer.location(in: sceneView)
guard let hit = sceneView.hitTest(point, options: nil).first else {
return
}
let box = SCNBox(width: 0.02, height: 0.02, length: 0.02, chamferRadius: 0)
let node = SCNNode(geometry: box)
node.position = SCNVector3(0, 0, 0.01)
box.materials.first?.diffuse.contents = UIColor.red
hit.node.addChildNode(node)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
let configuration = ARWorldTrackingConfiguration()
guard let images = ARReferenceImage.referenceImages(inGroupNamed: "ARTest", bundle: nil) else {
return
}
configuration.detectionImages = images
configuration.maximumNumberOfTrackedImages = 1
sceneView.session.run(configuration)
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else {
return
}
let size = imageAnchor.referenceImage.physicalSize
let plane = SCNPlane(width: size.width, height: size.height)
let planeNode = SCNNode(geometry: plane)
planeNode.eulerAngles.x = -Float.pi / 2
planeNode.opacity = 0.9
node.addChildNode(planeNode)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
}
I try make an AR app as a Measure default app in iPhone. ( I base on project TBXark/Ruler on github)
I draw startNode, endNode, cylinder line, and SCNText. But I can't manage the scale of size, it only readable in near, and so small when measure far plane detect.
I have 2 question:
How to keep size node, cylinder and text same when draw near or far as Measure App
How to draw scntext with background and align the same direction cylinder line as Measure App.
Here is my Line Node class:
class LineNode: NSObject {
let startNode: SCNNode
let endNode: SCNNode
var lineNode: SCNNode?
let textNode: SCNNode
let sceneView: ARSCNView?
// init func
init(startPos: SCNVector3,
sceneV: ARSCNView,
color: (start: UIColor, end: UIColor) = (UIColor(hexCss: 0xF1B426), UIColor(hexCss: 0xD43278)),
font: UIFont = UIFont.boldSystemFont(ofSize: 8) ) {
sceneView = sceneV
let scale = 1 / 400.0
let scaleVector = SCNVector3(scale, scale, scale)
func buildSCNSphere(color: UIColor) -> SCNSphere {
let dot = SCNSphere(radius: 1)
dot.firstMaterial?.diffuse.contents = color
dot.firstMaterial?.lightingModel = .constant
dot.firstMaterial?.isDoubleSided = true
return dot
}
// startNode
startNode = SCNNode(geometry: buildSCNSphere(color: color.start))
startNode.scale = scaleVector
startNode.position = startPos
sceneView?.scene.rootNode.addChildNode(startNode)
// endNode
endNode = SCNNode(geometry: buildSCNSphere(color: color.end))
endNode.scale = scaleVector
// line with start to end
lineNode = CylinderLine(parent: sceneView!.scene.rootNode,
v1: startNode.position,
v2: endNode.position,
radius: 0.001,
radSegmentCount: 16,
color: UIColor.white)
sceneView?.scene.rootNode.addChildNode(lineNode!)
// text show measure line length
let text = SCNText (string: "--", extrusionDepth: 0.1)
text.font = font
text.firstMaterial?.diffuse.contents = UIColor(hexCss: 0xffa800)
text.firstMaterial?.lightingModel = .constant
text.alignmentMode = CATextLayerAlignmentMode.center.rawValue
text.truncationMode = CATextLayerTruncationMode.middle.rawValue
text.firstMaterial?.isDoubleSided = true
textNode = SCNNode(geometry: text)
textNode.scale = SCNVector3(1 / 500.0, 1 / 500.0, 1 / 500.0)
super.init()
}
// update end node realtime
public func updatePosition(pos: SCNVector3, camera: ARCamera?, unit: MeasurementUnit.Unit = MeasurementUnit.Unit.centimeter) -> Float {
// update endNode
let posEnd = updateTransform(for: pos, camera: camera)
if endNode.parent == nil {
sceneView?.scene.rootNode.addChildNode(endNode)
}
endNode.position = posEnd
// caculate new mid
let posStart = startNode.position
let middle = SCNVector3((posStart.x + posEnd.x) / 2.0, (posStart.y + posEnd.y) / 2.0 + 0.002, (posStart.z + posEnd.z) / 2.0)
// update text measure
let text = textNode.geometry as! SCNText
let length = posEnd.distanceFromPos(pos: startNode.position)
text.string = MeasurementUnit(meterUnitValue: length).roundUpstring(type: unit)
text.materials.first?.diffuse.contents = UIColor.orange
textNode.setPivot()
textNode.position = middle
if textNode.parent == nil {
sceneView?.scene.rootNode.addChildNode(textNode)
}
lineNode?.removeFromParentNode()
lineNode = lineBetweenNodeA(nodeA: startNode, nodeB: endNode)
sceneView?.scene.rootNode.addChildNode(lineNode!)
return length
}
}
I use this to update scale if even if you stay far away it still readable
func updateScaleFromCameraForNodes(_ nodes: [SCNNode], fromPointOfView pointOfView: SCNNode , useScaling: Bool){
nodes.forEach { (node) in
//1. Get The Current Position Of The Node
let positionOfNode = SCNVector3ToGLKVector3(node.worldPosition)
//2. Get The Current Position Of The Camera
let positionOfCamera = SCNVector3ToGLKVector3(pointOfView.worldPosition)
//3. Calculate The Distance From The Node To The Camera
let distanceBetweenNodeAndCamera = GLKVector3Distance(positionOfNode, positionOfCamera)
let a = distanceBetweenNodeAndCamera*1.75
if(useScaling) {
node.simdScale = simd_float3(a,a,a)
}
}
SCNTransaction.flush()
}
then called it in the renderer updateAtTime
self.updateScaleFromCameraForNodes(self.nodesAdded, fromPointOfView:
self.cameraNode, useScaling: true)
I have image recognizes by using AR kit ,when detect image I need to show and play the video on presented scene (like above the detected image)?
lazy var fadeAndSpinAction: SCNAction = {
return .sequence([
.fadeIn(duration: fadeDuration),
.rotateBy(x: 0, y: 0, z: CGFloat.pi * 360 / 180, duration: rotateDuration),
.wait(duration: waitDuration),
.fadeOut(duration: fadeDuration)
])
}()
lazy var fadeAction: SCNAction = {
return .sequence([
.fadeOpacity(by: 0.8, duration: fadeDuration),
.wait(duration: waitDuration),
.fadeOut(duration: fadeDuration)
])
}()
lazy var fishNode: SCNNode = {
guard let scene = SCNScene(named: "Catfish1.scn"),
let node = scene.rootNode.childNode(withName: "Catfish1", recursively: false) else { return SCNNode() }
let scaleFactor = 0.005
node.scale = SCNVector3(scaleFactor, scaleFactor, scaleFactor)
node.eulerAngles.x = -.pi / 2
return node
}()
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
sceneView.delegate = self
configureLighting()
}
func configureLighting() {
sceneView.autoenablesDefaultLighting = true
sceneView.automaticallyUpdatesLighting = true
}
override func viewWillAppear(_ animated: Bool) {
resetTrackingConfiguration()
}
func resetTrackingConfiguration() {
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else { return }
let configuration = ARWorldTrackingConfiguration()
configuration.detectionImages = referenceImages
let options: ARSession.RunOptions = [.resetTracking, .removeExistingAnchors]
sceneView.session.run(configuration, options: options)
statusLabel.text = "Move camera around to detect images"
}
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
DispatchQueue.main.async {
guard let imageAnchor = anchor as? ARImageAnchor,
let imageName = imageAnchor.referenceImage.name else { return }
// TODO: Overlay 3D Object
let overlayNode = self.getNode(withImageName: imageName)
overlayNode.opacity = 0
overlayNode.position.y = 0.2
overlayNode.runAction(self.fadeAndSpinAction)
node.addChildNode(overlayNode)
self.statusLabel.text = "Image detected: \"\(imageName)\""
self.videoNode.geometry = SCNPlane(width: 1276.0 / 2.0, height: 712.0 / 2.0)
self.spriteKitScene.scaleMode = .aspectFit
self.videoSpriteKitNode?.position = CGPoint(x: self.spriteKitScene.size.width / 2.0, y: self.spriteKitScene.size.height / 2.0)
self.videoSpriteKitNode?.size = self.spriteKitScene.size
self.spriteKitScene.addChild(self.videoSpriteKitNode!)
self.videoNode.geometry?.firstMaterial?.diffuse.contents = self.spriteKitScene
var transform = SCNMatrix4MakeRotation(Float(M_PI), 0.0, 0.0, 1.0)
transform = SCNMatrix4Translate(transform, 1.0, 1.0, 0)
self.videoNode.geometry?.firstMaterial?.diffuse.contentsTransform = transform
self.videoNode.position = SCNVector3(x: 0, y: 30, z: 7)
node.addChildNode(self.videoNode)
self.videoSpriteKitNode?.play()
}
}
func getPlaneNode(withReferenceImage image: ARReferenceImage) -> SCNNode {
let plane = SCNPlane(width: image.physicalSize.width,
height: image.physicalSize.height)
let node = SCNNode(geometry: plane)
return node
}`
Looking at your code, firstly you are setting your SCNPlane to be 638 Metres wide and 356 Meters tall, I'm sure thats not what you actually want ^________^.
Anyway, here is an example of playing a local video using an SKScene & SKVideoNode which works well:
//--------------------------
// MARK: - ARSCNViewDelegate
//--------------------------
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have An ARImageAnchor And Have Detected Our Reference Image
guard let imageAnchor = anchor as? ARImageAnchor else { return }
let referenceImage = imageAnchor.referenceImage
//2. Get The Physical Width & Height Of Our Reference Image
let width = CGFloat(referenceImage.physicalSize.width)
let height = CGFloat(referenceImage.physicalSize.height)
//3. Create An SCNNode To Hold Our Video Player With The Same Size As The Image Target
let videoHolder = SCNNode()
let videoHolderGeometry = SCNPlane(width: width, height: height)
videoHolder.transform = SCNMatrix4MakeRotation(-Float.pi / 2, 1, 0, 0)
videoHolder.geometry = videoHolderGeometry
//4. Create Our Video Player
if let videoURL = Bundle.main.url(forResource: "BlackMirrorz", withExtension: "mp4"){
setupVideoOnNode(videoHolder, fromURL: videoURL)
}
//5. Add It To The Hierarchy
node.addChildNode(videoHolder)
}
/// Creates A Video Player As An SCNGeometries Diffuse Contents
///
/// - Parameters:
/// - node: SCNNode
/// - url: URL
func setupVideoOnNode(_ node: SCNNode, fromURL url: URL){
//1. Create An SKVideoNode
var videoPlayerNode: SKVideoNode!
//2. Create An AVPlayer With Our Video URL
let videoPlayer = AVPlayer(url: url)
//3. Intialize The Video Node With Our Video Player
videoPlayerNode = SKVideoNode(avPlayer: videoPlayer)
videoPlayerNode.yScale = -1
//4. Create A SpriteKitScene & Postion It
let spriteKitScene = SKScene(size: CGSize(width: 600, height: 300))
spriteKitScene.scaleMode = .aspectFit
videoPlayerNode.position = CGPoint(x: spriteKitScene.size.width/2, y: spriteKitScene.size.height/2)
videoPlayerNode.size = spriteKitScene.size
spriteKitScene.addChild(videoPlayerNode)
//6. Set The Nodes Geoemtry Diffuse Contenets To Our SpriteKit Scene
node.geometry?.firstMaterial?.diffuse.contents = spriteKitScene
//5. Play The Video
videoPlayerNode.play()
videoPlayer.volume = 0
}
}
Update:
If you want to place the video above the target you can do something like the following:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have An ARImageAnchor And Have Detected Our Reference Image
guard let imageAnchor = anchor as? ARImageAnchor else { return }
let referenceImage = imageAnchor.referenceImage
//2. Get The Physical Width & Height Of Our Reference Image
let width = CGFloat(referenceImage.physicalSize.width)
let height = CGFloat(referenceImage.physicalSize.height)
//3. Create An SCNNode To Hold Our Video Player
let videoHolder = SCNNode()
let planeHeight = height/2
let videoHolderGeometry = SCNPlane(width: width, height: planeHeight)
videoHolder.transform = SCNMatrix4MakeRotation(-Float.pi / 2, 1, 0, 0)
videoHolder.geometry = videoHolderGeometry
//4. Place It About The Target
let zPosition = height - (planeHeight/2)
videoHolder.position = SCNVector3(0, 0, -zPosition)
//5. Create Our Video Player
if let videoURL = Bundle.main.url(forResource: "BlackMirrorz", withExtension: "mp4"){
setupVideoOnNode(videoHolder, fromURL: videoURL)
}
//5. Add It To The Hierachy
node.addChildNode(videoHolder)
}
Hope it helps...