show masking on object which is between camera and wall using RealityKit - swift

I made a video for generating a floor plan in which I need to capture the wall and floor together at a certain position if a user is too near to the wall or if any object come between the camera and wall/floor then need to show Too Close mask on that object something like display in this video.
I try to use rycast in session(_ session: ARSession, didUpdate frame: ARFrame) method but I am very new in AR and not know which method we need to use.
func session(_ session: ARSession, didUpdate frame: ARFrame) {
guard let query = self.arView?.makeRaycastQuery(from: self.arView?.center ?? CGPoint.zero,
allowing: .estimatedPlane,
alignment: .any)
else { return }
guard let raycastResult = self.arView?.session.raycast(query).first
else { return }
let currentPositionOfCamera = raycastResult.worldTransform.getPosition()
if currentPositionOfCamera != .zero {
let distanceFromCamera = frame.camera.transform.getPosition().distanceFrom(position: currentPositionOfCamera)
print("Distance from raycast:",distanceFromCamera)
if (distance < 0.5) {
print("Too Close")
}
}
}

I am just learning ARKit and RealityKit as well, but wouldn't your code be:
let currentPositionOfCamera = self.arView.cameraTransform.translation
if currentPositionOfCamera != .zero {
// distance is defined in simd as the distance between 2 points
let distanceFromCamera = distance(raycastResult.worldTransform.position, currentPositionOfCamera)
print("Distance from raycast:",distanceFromCamera)
if (distanceFromCamera < 0.5) {
print("Too Close")
let rayDirection = normalize(raycastResult.worldTransform.position - self.arView.cameraTransform.translation)
// This pulls the text back toward the camera from the plane
let textPositionInWorldCoordinates = result.worldTransform.position - (rayDirection * 0.1)
let textEntity = self.model(for: classification)
// This scales the text so it is of a consistent size
textEntity.scale = .one * raycastDistance
var textPositionWithCameraOrientation = self.arView.cameraTransform
textPositionWithCameraOrientation.translation = textPositionInWorldCoordinates
// self.textAnchor is defined somewhere in the class as an optional
self.textAnchor = AnchorEntity(world: textPositionWithCameraOrientation.matrix)
textAnchor.addChild(textEntity)
self.arView.scene.addAnchor(textAnchor)
} else {
guard let textAnchor = self.textAnchor else { return }
self.removeAnchor(textAnchor)
}
}
// Creates a text ModelEntity
func tooCloseModel() -> ModelEntity {
let lineHeight: CGFloat = 0.05
let font = MeshResource.Font.systemFont(ofSize: lineHeight)
let textMesh = MeshResource.generateText("Too Close", extrusionDepth: Float(lineHeight * 0.1), font: font)
let textMaterial = SimpleMaterial(color: classification.color, isMetallic: true)
let model = ModelEntity(mesh: textMesh, materials: [textMaterial])
// Center the text
model.position.x -= model.visualBounds(relativeTo: nil).extents.x / 2
return model
}
This code is adapted from Apple's Visualizing Scene Semantics.

Related

Get snapshot from AVCaptureSession contaning Visionkit face detection elements

I use AVCaptureSession to setup a camera view and using vision kit to detect and add a rectangular on the face.
Here is how I can do it
override func viewDidLoad() {
super.viewDidLoad()
self.prepareVisionRequest()
}
fileprivate func prepareVisionRequest() {
//self.trackingRequests = []
var requests = [VNTrackObjectRequest]()
let faceDetectionRequest = VNDetectFaceRectanglesRequest(completionHandler: { (request, error) in
if error != nil {
print("FaceDetection error: \(String(describing: error)).")
}
guard let faceDetectionRequest = request as? VNDetectFaceRectanglesRequest,
let results = faceDetectionRequest.results else {
return
}
DispatchQueue.main.async {
// Add the observations to the tracking list
for observation in results {
let faceTrackingRequest = VNTrackObjectRequest(detectedObjectObservation: observation)
faceTrackingRequest.trackingLevel = .fast
requests.append(faceTrackingRequest)
}
self.trackingRequests = requests
}
})
// Start with detection. Find face, then track it.
self.detectionRequests = [faceDetectionRequest]
self.sequenceRequestHandler = VNSequenceRequestHandler()
self.setupVisionDrawingLayers()
}
// MARK: Drawing Vision Observations
fileprivate func setupVisionDrawingLayers() {
let captureDeviceResolution = self.captureDeviceResolution
let captureDeviceBounds = CGRect(x: 0,
y: 0,
width: captureDeviceResolution.width,
height: captureDeviceResolution.height)
let captureDeviceBoundsCenterPoint = CGPoint(x: captureDeviceBounds.midX,
y: captureDeviceBounds.midY)
let normalizedCenterPoint = CGPoint(x: 0.5, y: 0.5)
guard let rootLayer = self.rootLayer else {
self.presentErrorAlert(message: "view was not property initialized")
return
}
let overlayLayer = CALayer()
overlayLayer.name = "DetectionOverlay"
overlayLayer.masksToBounds = true
overlayLayer.anchorPoint = normalizedCenterPoint
overlayLayer.bounds = captureDeviceBounds
overlayLayer.position = CGPoint(x: rootLayer.bounds.midX, y: rootLayer.bounds.midY)
let faceRectangleShapeLayer = CAShapeLayer()
faceRectangleShapeLayer.name = "RectangleOutlineLayer"
faceRectangleShapeLayer.bounds = captureDeviceBounds
faceRectangleShapeLayer.anchorPoint = normalizedCenterPoint
faceRectangleShapeLayer.position = captureDeviceBoundsCenterPoint
faceRectangleShapeLayer.fillColor = UIColor.white.withAlphaComponent(0.9).cgColor
// faceLandmarksShapeLayer.strokeColor = UIColor.white.withAlphaComponent(0.7).cgColor
faceRectangleShapeLayer.lineWidth = 5
faceRectangleShapeLayer.shadowOpacity = 0.7
faceRectangleShapeLayer.shadowRadius = 5
let faceLandmarksShapeLayer = CAShapeLayer()
faceLandmarksShapeLayer.name = "FaceLandmarksLayer"
faceLandmarksShapeLayer.bounds = captureDeviceBounds
faceLandmarksShapeLayer.anchorPoint = normalizedCenterPoint
faceLandmarksShapeLayer.position = captureDeviceBoundsCenterPoint
faceLandmarksShapeLayer.fillColor = nil
faceLandmarksShapeLayer.strokeColor = nil
//
overlayLayer.addSublayer(faceRectangleShapeLayer)
faceRectangleShapeLayer.addSublayer(faceLandmarksShapeLayer)
rootLayer.addSublayer(overlayLayer)
self.detectionOverlayLayer = overlayLayer
self.detectedFaceRectangleShapeLayer = faceRectangleShapeLayer
self.detectedFaceLandmarksShapeLayer = faceLandmarksShapeLayer
self.updateLayerGeometry()
}
Now, I'm trying three ways to take snapshots
1- using UIGraphicsImageRenderer, it shows only the rectangular on the face and the camera view in not visible - it's black
2- Take image from captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) - the image from the buffer shows only the camera view, no rectangular
3- use AVCapturePhotoCaptureDelegate to capture photo from AVCaptureSession, the captured photo shows only the camera view, no rectangular
Could you please help me to take snapshot that contains both camera view and the rectangular! Thanks

Face position using visionKit in ARKit

I added visionKit face detection on an ARSCNView, it cab detect the face, here how I did that
public func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
let faceDetectionRequest = VNDetectFaceRectanglesRequest(completionHandler: { (request: VNRequest, error: Error?) in
DispatchQueue.main.async {
self.faceLayers.forEach { drawing in
drawing.removeFromSuperlayer()
}
if let observations = request.results as? [VNFaceObservation] {
self.handleFaceDetectionObservations(observations: observations)
}
}
})
guard let capturedImage = sceneView.session.currentFrame?.capturedImage else { return }
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: capturedImage, orientation: .leftMirrored, options: [:])
do {
try imageRequestHandler.perform([faceDetectionRequest])
} catch {
print("perform fail, error: ", error.localizedDescription)
}
}
fileprivate func handleFaceDetectionObservations(observations: [VNFaceObservation]) {
for observation in observations {
let newWidth = sceneView.bounds.width * observation.boundingBox.width
let newHeight = sceneView.bounds.height * observation.boundingBox.height
let newX = sceneView.bounds.width * observation.boundingBox.origin.x
let newY = sceneView.bounds.height * observation.boundingBox.origin.y
let faceRectConverted = CGRect(x: newX, y: newY, width: newWidth, height: newHeight)
let faceRectanglePath = CGPath(rect: faceRectConverted, transform: nil)
let faceLayer = CAShapeLayer()
faceLayer.path = faceRectanglePath
faceLayer.fillColor = UIColor.black.cgColor
self.faceLayers.append(faceLayer)
self.sceneView.layer.addSublayer(faceLayer)
}
}
The only issue that I have here is the face position in the view, it's calculated wrong. Looks like the problem come from camera mirroring, when the goes right, the face rectangular goes left, or when the face goes up, the rectangular goes down. I don't know how to do the right calculation to tie observation rect to the right place in sceneView . Could anyone help me on that!
Also have a same problem on landscape, the rectangular height is more compact there...
Thanks

AR objects not anchoring or sizing correctly in RealityKit

I have an AR scene with two objects, one brown cow and one black one. They're both supposed to be displayed in the scene, distanced a little apart. I originally only had the brown cow, which was just a little bit too big. I changed something, which I can't remember, and now my scene is from the inside of the cow, and I can't exit the cow's corpse. It seems like it moves around when I do. I think the issue is because of a positive number for the [minimum bounds]but I'm not entirely sure. I've set the z axis for the cow as well.How can I make the cow a little bit smaller and about 5-7 yards away from me at spawn?
import UIKit
import RealityKit
import ARKit
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
override func viewDidLoad() {
super.viewDidLoad()
arView.session.delegate = self
showModel()
overlayCoachingView()
setupARView()
arView.addGestureRecognizer(UITapGestureRecognizer(target: self, action:
#selector(handleTap(recognizer:))))
}
func showModel(){
let anchorEntity = AnchorEntity(plane: .horizontal, minimumBounds:[0.7, 0.7])
let entity = try! Entity.loadModel(named: "COW_ANIMATIONS")
entity.setParent(anchorEntity)
arView.scene.addAnchor(anchorEntity)
}
func overlayCoachingView () {
let coachingView = ARCoachingOverlayView(frame: CGRect(x: 0, y: 0, width:
arView.frame.width, height: arView.frame.height))
coachingView.session = arView.session
coachingView.activatesAutomatically = true
coachingView.goal = .horizontalPlane
view.addSubview(coachingView)
}
// Load the "Box" scene from the "Experience" Reality File
// let boxAnchor = try! Experience.loadBox()
// Add the box anchor to the scene
//arView.scene.anchors.append(boxAnchor)
func setupARView(){
arView.automaticallyConfigureSession = false
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = [.horizontal, .vertical]
configuration.environmentTexturing = .automatic
arView.session.run(configuration)
}
//object placement
#objc
func handleTap(recognizer: UITapGestureRecognizer){
let location = recognizer.location(in:arView)
let results = arView.raycast(from: location, allowing: .estimatedPlane, alignment: .horizontal)
if let firstResult = results.first {
let anchor = ARAnchor(name: "COW_ANIMATIONS", transform: firstResult.worldTransform)
arView.session.add(anchor: anchor)
} else {
print("Object placement failed - couldn't find surface.")
//cow animations
let robot = try! ModelEntity.load(named: "COW_ANIMATIONS")
let anchor = AnchorEntity()
anchor.children.append(robot)
arView.scene.anchors.append(anchor)
robot.playAnimation(robot.availableAnimations[0].repeat(duration: .infinity),
transitionDuration: 0.5,
startsPaused: false)
//start cow animation
let brownCow = try! ModelEntity.load(named: "COW_ANIMATIONS")
let blackCow = try! ModelEntity.load(named: "Cow")
brownCow.position.x = -1.0
blackCow.position.x = 1.0
brownCow.setParent(anchor)
blackCow.setParent(anchor)
arView.scene.anchors.append(anchor)
let cowAnimationResource = brownCow.availableAnimations[0]
let horseAnimationResource = blackCow.availableAnimations[0]
brownCow.playAnimation(cowAnimationResource.repeat(duration: .infinity),
transitionDuration: 1.25,
startsPaused: false)
blackCow.playAnimation(horseAnimationResource.repeat(duration: .infinity),
transitionDuration: 0.75,
startsPaused: false)
//end cow animations
func placeObject(named entityName: String, for anchor: ARAnchor) {
let entity = try! ModelEntity.loadModel(named: entityName)
entity.generateCollisionShapes(recursive: true)
arView.installGestures([.rotation, .translation], for: entity)
let anchorEntity = AnchorEntity(anchor: anchor)
anchorEntity.addChild(entity)
arView.scene.addAnchor(anchorEntity)
}
}
extension ViewController: ARSessionDelegate {
func session( session: ARSession, didAdd anchors: [ARAnchor]) {
for anchor in anchors {
if let anchorName = anchor.name, anchorName == "COW_ANIMATIONS" {
placeObject(named: anchorName, for: anchor)
} }
}
}
First step
In RealityKit, if a model was tethered with its personal anchor (the case when one anchor holds just one model), you have two ways to scale it:
cowEntity.scale = [0.7, 0.7, 0.7]
// or
cowAnchor.scale = SIMD3<Float>([1, 1, 1] * 0.7)
and you have minimum two ways to position cow model along any axis (for instance along Z axis):
cowEntity.position = SIMD3<Float>(0, 0,-2)
// or
cowAnchor.position.z = -2.0
So, as you see, when you transform cowAnchor, all its children get this transformation as well.
Second step
You need to appropriately place a model's pivot point in 3D authoring app. At the moment RealityKit doesn't have a tool to fix pivot's position as you can do in SceneKit using simdPivot instance property.

Projecting the ARKit face tracking 3D mesh to 2D image coordinates

I am collecting face mesh 3D vertices using ARKit. I have read: Mapping image onto 3D face mesh and Tracking and Visualizing Faces.
I have the following struct:
struct CaptureData {
var vertices: [SIMD3<Float>]
var verticesformatted: String {
let verticesDescribed = vertices.map({ "\($0.x):\($0.y):\($0.z)" }).joined(separator: "~")
return "<\(verticesDescribed)>"
}
}
I have a Strat button to capture vertices:
#IBAction private func startPressed() {
captureData = [] // Clear data
currentCaptureFrame = 0 //inital capture frame
fpsTimer = Timer.scheduledTimer(withTimeInterval: 1/fps, repeats: true, block: {(timer) -> Void in self.recordData()})
}
private var fpsTimer = Timer()
private var captureData: [CaptureData] = [CaptureData]()
private var currentCaptureFrame = 0
And a stop button to stop capturing (save the data):
#IBAction private func stopPressed() {
do {
fpsTimer.invalidate() //turn off the timer
let capturedData = captureData.map{$0.verticesformatted}.joined(separator:"")
let dir: URL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).last! as URL
let url = dir.appendingPathComponent("facedata.txt")
try capturedData.appendLineToURL(fileURL: url as URL)
}
catch {
print("Could not write to file")
}
}
Function for recoding data
private func recordData() {
guard let data = getFrameData() else { return }
captureData.append(data)
currentCaptureFrame += 1
}
Function for get frame data
private func getFrameData() -> CaptureData? {
let arFrame = sceneView?.session.currentFrame!
guard let anchor = arFrame?.anchors[0] as? ARFaceAnchor else {return nil}
let vertices = anchor.geometry.vertices
let data = CaptureData(vertices: vertices)
return data
}
ARSCN extension:
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let faceAnchor = anchor as? ARFaceAnchor else { return }
currentFaceAnchor = faceAnchor
if node.childNodes.isEmpty, let contentNode = selectedContentController.renderer(renderer, nodeFor: faceAnchor) {
node.addChildNode(contentNode)
}
selectedContentController.session = sceneView?.session
selectedContentController.sceneView = sceneView
}
/// - Tag: ARFaceGeometryUpdate
func renderer(_ renderer: SCNSceneRenderer, didUpdate node: SCNNode, for anchor: ARAnchor) {
guard anchor == currentFaceAnchor,
let contentNode = selectedContentController.contentNode,
contentNode.parent == node
else { return }
selectedContentController.session = sceneView?.session
selectedContentController.sceneView = sceneView
selectedContentController.renderer(renderer, didUpdate: contentNode, for: anchor)
}
}
I am trying to use the example code from Tracking and Visualizing Faces:
// Transform the vertex to the camera coordinate system.
float4 vertexCamera = scn_node.modelViewTransform * _geometry.position;
// Camera projection and perspective divide to get normalized viewport coordinates (clip space).
float4 vertexClipSpace = scn_frame.projectionTransform * vertexCamera;
vertexClipSpace /= vertexClipSpace.w;
// XY in clip space is [-1,1]x[-1,1], so adjust to UV texture coordinates: [0,1]x[0,1].
// Image coordinates are Y-flipped (upper-left origin).
float4 vertexImageSpace = float4(vertexClipSpace.xy * 0.5 + 0.5, 0.0, 1.0);
vertexImageSpace.y = 1.0 - vertexImageSpace.y;
// Apply ARKit's display transform (device orientation * front-facing camera flip).
float4 transformedVertex = displayTransform * vertexImageSpace;
// Output as texture coordinates for use in later rendering stages.
_geometry.texcoords[0] = transformedVertex.xy;
I aslo read about projection point (but not sure which one is more applicable still):
func projectPoint(_ point: SCNVector3) -> SCNVector3
My question is how to use the example code above and transform the collected 3D face mesh vertices to 2D image coordinates??
I would like to get the 3D mesh vertices together with their corresponding 2D coordinates.
Currently, I can capture the face mesh points like so: <mesh_x: mesh_ y: mesh_ z:...>
I would to convert my mesh points to the image coordinates and show them together like so:
Expected result: <mesh_x: mesh_ y: mesh_ z:img_x: img_y...>
Any suggestions? Thanks in advance!
Maybe you can use the projectPoint function of the SCNSceneRenderer.
extension ARFaceAnchor{
// struct to store the 3d vertex and the 2d projection point
struct VerticesAndProjection {
var vertex: SIMD3<Float>
var projected: CGPoint
}
// return a struct with vertices and projection
func verticeAndProjection(to view: ARSCNView) -> [VerticesAndProjection]{
let points = geometry.vertices.compactMap({ (vertex) -> VerticesAndProjection? in
let col = SIMD4<Float>(SCNVector4())
let pos = SIMD4<Float>(SCNVector4(vertex.x, vertex.y, vertex.z, 1))
let pworld = transform * simd_float4x4(col, col, col, pos)
let vect = view.projectPoint(SCNVector3(pworld.position.x, pworld.position.y, pworld.position.z))
let p = CGPoint(x: CGFloat(vect.x), y: CGFloat(vect.y))
return VerticesAndProjection(vertex:vertex, projected: p)
})
return points
}
}
Here is a convenient way to get the position:
extension matrix_float4x4 {
/// Get the position of the transform matrix.
public var position: SCNVector3 {
get{
return SCNVector3(self[3][0], self[3][1], self[3][2])
}
}
}
If you want to check that the projection is ok, add a debug subview to the ARSCNView instance, then, with a couple of others extensions to draw the 2d points on a view such as:
extension UIView{
private struct drawCircleProperty{
static let circleFillColor = UIColor.green
static let circleStrokeColor = UIColor.black
static let circleRadius: CGFloat = 3.0
}
func drawCircle(point: CGPoint) {
let circlePath = UIBezierPath(arcCenter: point, radius: drawCircleProperty.circleRadius, startAngle: CGFloat(0), endAngle: CGFloat(Double.pi * 2.0), clockwise: true)
let shapeLayer = CAShapeLayer()
shapeLayer.path = circlePath.cgPath
shapeLayer.fillColor = drawCircleProperty.circleFillColor.cgColor
shapeLayer.strokeColor = drawCircleProperty.circleStrokeColor.cgColor
self.layer.addSublayer(shapeLayer)
}
func drawCircles(points: [CGPoint]){
self.clearLayers()
for point in points{
self.drawCircle(point: point)
}
}
func clearLayers(){
if let subLayers = self.layer.sublayers {
for subLayer in subLayers {
subLayer.removeFromSuperlayer()
}
}
}
You can compute the projection, and draw the points with:
let points:[ARFaceAnchor.VerticesAndProjection] = faceAnchor.verticeAndProjection(to: sceneView)
// keep only the projected points
let projected = points.map{ $0.projected}
// draw the points !
self.debugView?.drawCircles(points: projected)
I can see all the 3d vertices projected on the 2d screen (picture generated by https://thispersondoesnotexist.com).
I added this code to the Apple demo project, available here https://github.com/hugoliv/projectvertices.git

Get Size of image in SCNNode / ARKit Swift

I'm trying to scan a Reference-Image an then display the image itself above the printed reference-image. The "virutal" image size should be the same like the printed size.
My idea: get the size of the printed Reference-Image, then scale the image in the SCNNode to this size (or scale the SCNNode to this size?)
But: 1-> How to get the size of the printed image, 2-> for scaling the SCNNode I need the size of this node, too. How to get it?
import UIKit
import SceneKit
import ARKit
import AVKit
import AVFoundation
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
private var planeNode: SCNNode?
private var imageNode: SCNNode?
private var animationInfo: AnimationInfo?
private var currentMediaName: String?
private var scrollView: UIScrollView!
override func viewDidLoad() {
super.viewDidLoad()
let scene = SCNScene()
sceneView.scene = scene
sceneView.delegate = self
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Load reference images to look for from "AR Resources" folder
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else {
fatalError("Missing expected asset catalog resources.")
}
// Create a session configuration
let configuration = ARWorldTrackingConfiguration()
// Add previously loaded images to ARScene configuration as detectionImages
configuration.detectionImages = referenceImages
// Run the view's session
sceneView.session.run(configuration)
let tap = UITapGestureRecognizer(target: self, action: #selector(handleTap(rec:)))
//Add recognizer to sceneview
sceneView.addGestureRecognizer(tap)
}
//Method called when tap
#objc func handleTap(rec: UITapGestureRecognizer){
let location: CGPoint = rec.location(in: sceneView)
let hits = self.sceneView.hitTest(location, options: nil)
if !hits.isEmpty{
let tappedNode = hits.first?.node
if tappedNode != nil && tappedNode?.name != nil{
let stringArr = tappedNode?.name?.components(separatedBy: "-")
let name = stringArr! [0]
let size = stringArr! [1].components(separatedBy: ",")
let width = Float(size [0])
let height = Float(size [1])
loadReferenceImage(tappedNode: tappedNode!, name: (name), width: width!, height: height!)
}
}
}
private func playVideo() {
guard let path = Bundle.main.path(forResource: "video", ofType:"m4v") else {
debugPrint("video.m4v not found")
return
}
let player = AVPlayer(url: URL(fileURLWithPath: path))
let playerController = AVPlayerViewController()
playerController.player = player
present(playerController, animated: true) {
player.play()
}
}
func loadReferenceImage(tappedNode: SCNNode, name: String, width: Float, height: Float){
print("TAP")
print(name)
let currentNode = tappedNode.parent
if let image = UIImage(named: "col" + name){
let childNodes = currentNode?.childNodes
for node in (childNodes)!{
node.removeFromParentNode()
}
let newImage = UIImage(named: "col" + name)
let newnode = SCNNode(geometry: SCNPlane(width: CGFloat(width), height: CGFloat(height)))
newnode.geometry?.firstMaterial?.diffuse.contents = newImage
newnode.scale = SCNVector3(x: 10, y: 10, z: 10)
currentNode?.removeAnimation(forKey: "spin_around")
let rotation = SCNVector3((currentNode?.eulerAngles.x)!-0.95,(currentNode?.eulerAngles.y)!,(currentNode?.eulerAngles.z)!)
currentNode?.eulerAngles = rotation
//SIZE??????
let nodex = currentNode?.scale.x
let nodey = currentNode?.scale.y
let nodez = currentNode?.scale.z
let factorx = width / nodex!
let factory = height / nodey!
currentNode?.addChildNode(newnode)
}
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else {
return
}
// 1. Load scene.
let planeScene = SCNScene(named: "art.scnassets/plane.scn")!
let planeNode = planeScene.rootNode.childNode(withName: "planeRootNode", recursively: true)!
// 2. Calculate size based on planeNode's bounding box.
let (min, max) = planeNode.boundingBox
let size = SCNVector3Make(max.x - min.x, max.y - min.y, max.z - min.z)
// 3. Calculate the ratio of difference between real image and object size.
// Ignore Y axis because it will be pointed out of the image.
let widthRatio = Float(imageAnchor.referenceImage.physicalSize.width)/1.2
let heightRatio = Float(imageAnchor.referenceImage.physicalSize.height)/1.2
let width = imageAnchor.referenceImage.physicalSize.width
let height = imageAnchor.referenceImage.physicalSize.height
let prefix = "-"
let imageSize = width.description + "," + height.description
let targetName = imageAnchor.referenceImage.name! + prefix + imageSize
// Pick smallest value to be sure that object fits into the image.
let finalRatio = [widthRatio, heightRatio].min()!
// 4. Set transform from imageAnchor data.
planeNode.transform = SCNMatrix4(imageAnchor.transform)
// 5. Animate appearance by scaling model from 0 to previously calculated value.
let appearanceAction = SCNAction.scale(to: CGFloat(finalRatio), duration: 0.4)
//test
appearanceAction.timingMode = .easeOut
// Set initial scale to 0.
planeNode.scale = SCNVector3Make(0 , 0, 0)
//rotate y
let spin = CABasicAnimation(keyPath: "rotation")
spin.fromValue = NSValue(scnVector4: SCNVector4(x: 0, y: 1, z: 0, w: 0))
spin.toValue = NSValue(scnVector4: SCNVector4(x: 0, y: 1, z: 0, w: Float(CGFloat(2 * Double.pi))))
spin.duration = 4
spin.repeatCount = .infinity
planeNode.addAnimation(spin, forKey: "spin_around")
// Add to root node.
sceneView.scene.rootNode.addChildNode(planeNode)
// Run the appearance animation.
planeNode.runAction(appearanceAction)
planeNode.name = targetName
let nodes = planeNode.childNodes
for node in nodes{
node.name = targetName
}
self.planeNode = planeNode
self.imageNode = node
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor, updateAtTime time: TimeInterval) {
guard let imageNode = imageNode, let planeNode = planeNode else {
return
}
// 1. Unwrap animationInfo. Calculate animationInfo if it is nil.
guard let animationInfo = animationInfo else {
refreshAnimationVariables(startTime: time,
initialPosition: planeNode.simdWorldPosition,
finalPosition: imageNode.simdWorldPosition,
initialOrientation: planeNode.simdWorldOrientation,
finalOrientation: imageNode.simdWorldOrientation)
return
}
// 2. Calculate new animationInfo if image position or orientation changed.
if !simd_equal(animationInfo.finalModelPosition, imageNode.simdWorldPosition) || animationInfo.finalModelOrientation != imageNode.simdWorldOrientation {
refreshAnimationVariables(startTime: time,
initialPosition: planeNode.simdWorldPosition,
finalPosition: imageNode.simdWorldPosition,
initialOrientation: planeNode.simdWorldOrientation,
finalOrientation: imageNode.simdWorldOrientation)
}
// 3. Calculate interpolation based on passedTime/totalTime ratio.
let passedTime = time - animationInfo.startTime
var t = min(Float(passedTime/animationInfo.duration), 1)
// Applying curve function to time parameter to achieve "ease out" timing
t = sin(t * .pi * 0.5)
// 4. Calculate and set new model position and orientation.
let f3t = simd_make_float3(t, t, t)
planeNode.simdWorldPosition = simd_mix(animationInfo.initialModelPosition, animationInfo.finalModelPosition, f3t)
planeNode.simdWorldOrientation = simd_slerp(animationInfo.initialModelOrientation, animationInfo.finalModelOrientation, t)
//planeNode.simdWorldOrientation = imageNode.simdWorldOrientation
guard let currentImageAnchor = anchor as? ARImageAnchor else { return }
}
func refreshAnimationVariables(startTime: TimeInterval, initialPosition: float3, finalPosition: float3, initialOrientation: simd_quatf, finalOrientation: simd_quatf) {
let distance = simd_distance(initialPosition, finalPosition)
// Average speed of movement is 0.15 m/s.
let speed = Float(0.15)
// Total time is calculated as distance/speed. Min time is set to 0.1s and max is set to 2s.
let animationDuration = Double(min(max(0.1, distance/speed), 2))
// Store animation information for later usage.
animationInfo = AnimationInfo(startTime: startTime,
duration: animationDuration,
initialModelPosition: initialPosition,
finalModelPosition: finalPosition,
initialModelOrientation: initialOrientation,
finalModelOrientation: finalOrientation)
}
}
In order to do this I believe that first you need to get the size in Pixels of the UIImage by
multiplying the size values by the value in the scale property to get
the pixel dimensions of the image.
As such an example would be something like so:
guard let image = UIImage(named: "launchScreen") else { return }
let pixelWidth = image.size.width * image.scale
let pixelHeight = image.size.height * image.scale
print(pixelWidth, pixelHeight)
The size of my image when made in Adobe Illustrator was 3072 x 4099, and when I logged the results in the console the dimensions were also the same.
Now the tricky part here is calculating the pixels to a size we can use in ARKit, remembering that different devices have a different PPI (Pixels Per Inch) density.
In my example I am just going to use the PPI of an iPhone7Plus which is 401.
//1. Get The PPI Of The iPhone7Plus
let iphone7PlusPixelsPerInch: CGFloat = 401
//2. To Get The Image Size In Inches We Need To Divide By The PPI
let inchWidth = pixelWidth/iphone7PlusPixelsPerInch
let inchHeight = pixelHeight/iphone7PlusPixelsPerInch
//3. Calculate The Size In Metres (There Are 2.54 Cm's In An Inch)
let widthInMetres = (inchWidth * 2.54) / 100
let heightInMeters = (inchHeight * 2.54) / 100
Now we have the size of our Image in Metres it is simple to create an SCNNode of that size e.g:
//1. Generate An SCNPlane With The Same Size As Our Image
let realScaleNode = SCNNode(geometry: SCNPlane(width: widthInMetres, height: heightInMeters))
realScaleNode.geometry?.firstMaterial?.diffuse.contents = image
realScaleNode.position = SCNVector3(0, 0, -1)
//2. Add It To Our Hierachy
self.augmentedRealityView.scene.rootNode.addChildNode(realScaleNode)
Hope it helps...
P.S: This may be useful for helping you get the PPI of the Screen (marchv/UIScreenExtension)