Projecting the ARKit face tracking 3D mesh to 2D image coordinates - swift

I am collecting face mesh 3D vertices using ARKit. I have read: Mapping image onto 3D face mesh and Tracking and Visualizing Faces.
I have the following struct:
struct CaptureData {
var vertices: [SIMD3<Float>]
var verticesformatted: String {
let verticesDescribed = vertices.map({ "\($0.x):\($0.y):\($0.z)" }).joined(separator: "~")
return "<\(verticesDescribed)>"
}
}
I have a Strat button to capture vertices:
#IBAction private func startPressed() {
captureData = [] // Clear data
currentCaptureFrame = 0 //inital capture frame
fpsTimer = Timer.scheduledTimer(withTimeInterval: 1/fps, repeats: true, block: {(timer) -> Void in self.recordData()})
}
private var fpsTimer = Timer()
private var captureData: [CaptureData] = [CaptureData]()
private var currentCaptureFrame = 0
And a stop button to stop capturing (save the data):
#IBAction private func stopPressed() {
do {
fpsTimer.invalidate() //turn off the timer
let capturedData = captureData.map{$0.verticesformatted}.joined(separator:"")
let dir: URL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).last! as URL
let url = dir.appendingPathComponent("facedata.txt")
try capturedData.appendLineToURL(fileURL: url as URL)
}
catch {
print("Could not write to file")
}
}
Function for recoding data
private func recordData() {
guard let data = getFrameData() else { return }
captureData.append(data)
currentCaptureFrame += 1
}
Function for get frame data
private func getFrameData() -> CaptureData? {
let arFrame = sceneView?.session.currentFrame!
guard let anchor = arFrame?.anchors[0] as? ARFaceAnchor else {return nil}
let vertices = anchor.geometry.vertices
let data = CaptureData(vertices: vertices)
return data
}
ARSCN extension:
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let faceAnchor = anchor as? ARFaceAnchor else { return }
currentFaceAnchor = faceAnchor
if node.childNodes.isEmpty, let contentNode = selectedContentController.renderer(renderer, nodeFor: faceAnchor) {
node.addChildNode(contentNode)
}
selectedContentController.session = sceneView?.session
selectedContentController.sceneView = sceneView
}
/// - Tag: ARFaceGeometryUpdate
func renderer(_ renderer: SCNSceneRenderer, didUpdate node: SCNNode, for anchor: ARAnchor) {
guard anchor == currentFaceAnchor,
let contentNode = selectedContentController.contentNode,
contentNode.parent == node
else { return }
selectedContentController.session = sceneView?.session
selectedContentController.sceneView = sceneView
selectedContentController.renderer(renderer, didUpdate: contentNode, for: anchor)
}
}
I am trying to use the example code from Tracking and Visualizing Faces:
// Transform the vertex to the camera coordinate system.
float4 vertexCamera = scn_node.modelViewTransform * _geometry.position;
// Camera projection and perspective divide to get normalized viewport coordinates (clip space).
float4 vertexClipSpace = scn_frame.projectionTransform * vertexCamera;
vertexClipSpace /= vertexClipSpace.w;
// XY in clip space is [-1,1]x[-1,1], so adjust to UV texture coordinates: [0,1]x[0,1].
// Image coordinates are Y-flipped (upper-left origin).
float4 vertexImageSpace = float4(vertexClipSpace.xy * 0.5 + 0.5, 0.0, 1.0);
vertexImageSpace.y = 1.0 - vertexImageSpace.y;
// Apply ARKit's display transform (device orientation * front-facing camera flip).
float4 transformedVertex = displayTransform * vertexImageSpace;
// Output as texture coordinates for use in later rendering stages.
_geometry.texcoords[0] = transformedVertex.xy;
I aslo read about projection point (but not sure which one is more applicable still):
func projectPoint(_ point: SCNVector3) -> SCNVector3
My question is how to use the example code above and transform the collected 3D face mesh vertices to 2D image coordinates??
I would like to get the 3D mesh vertices together with their corresponding 2D coordinates.
Currently, I can capture the face mesh points like so: <mesh_x: mesh_ y: mesh_ z:...>
I would to convert my mesh points to the image coordinates and show them together like so:
Expected result: <mesh_x: mesh_ y: mesh_ z:img_x: img_y...>
Any suggestions? Thanks in advance!

Maybe you can use the projectPoint function of the SCNSceneRenderer.
extension ARFaceAnchor{
// struct to store the 3d vertex and the 2d projection point
struct VerticesAndProjection {
var vertex: SIMD3<Float>
var projected: CGPoint
}
// return a struct with vertices and projection
func verticeAndProjection(to view: ARSCNView) -> [VerticesAndProjection]{
let points = geometry.vertices.compactMap({ (vertex) -> VerticesAndProjection? in
let col = SIMD4<Float>(SCNVector4())
let pos = SIMD4<Float>(SCNVector4(vertex.x, vertex.y, vertex.z, 1))
let pworld = transform * simd_float4x4(col, col, col, pos)
let vect = view.projectPoint(SCNVector3(pworld.position.x, pworld.position.y, pworld.position.z))
let p = CGPoint(x: CGFloat(vect.x), y: CGFloat(vect.y))
return VerticesAndProjection(vertex:vertex, projected: p)
})
return points
}
}
Here is a convenient way to get the position:
extension matrix_float4x4 {
/// Get the position of the transform matrix.
public var position: SCNVector3 {
get{
return SCNVector3(self[3][0], self[3][1], self[3][2])
}
}
}
If you want to check that the projection is ok, add a debug subview to the ARSCNView instance, then, with a couple of others extensions to draw the 2d points on a view such as:
extension UIView{
private struct drawCircleProperty{
static let circleFillColor = UIColor.green
static let circleStrokeColor = UIColor.black
static let circleRadius: CGFloat = 3.0
}
func drawCircle(point: CGPoint) {
let circlePath = UIBezierPath(arcCenter: point, radius: drawCircleProperty.circleRadius, startAngle: CGFloat(0), endAngle: CGFloat(Double.pi * 2.0), clockwise: true)
let shapeLayer = CAShapeLayer()
shapeLayer.path = circlePath.cgPath
shapeLayer.fillColor = drawCircleProperty.circleFillColor.cgColor
shapeLayer.strokeColor = drawCircleProperty.circleStrokeColor.cgColor
self.layer.addSublayer(shapeLayer)
}
func drawCircles(points: [CGPoint]){
self.clearLayers()
for point in points{
self.drawCircle(point: point)
}
}
func clearLayers(){
if let subLayers = self.layer.sublayers {
for subLayer in subLayers {
subLayer.removeFromSuperlayer()
}
}
}
You can compute the projection, and draw the points with:
let points:[ARFaceAnchor.VerticesAndProjection] = faceAnchor.verticeAndProjection(to: sceneView)
// keep only the projected points
let projected = points.map{ $0.projected}
// draw the points !
self.debugView?.drawCircles(points: projected)
I can see all the 3d vertices projected on the 2d screen (picture generated by https://thispersondoesnotexist.com).
I added this code to the Apple demo project, available here https://github.com/hugoliv/projectvertices.git

Related

show masking on object which is between camera and wall using RealityKit

I made a video for generating a floor plan in which I need to capture the wall and floor together at a certain position if a user is too near to the wall or if any object come between the camera and wall/floor then need to show Too Close mask on that object something like display in this video.
I try to use rycast in session(_ session: ARSession, didUpdate frame: ARFrame) method but I am very new in AR and not know which method we need to use.
func session(_ session: ARSession, didUpdate frame: ARFrame) {
guard let query = self.arView?.makeRaycastQuery(from: self.arView?.center ?? CGPoint.zero,
allowing: .estimatedPlane,
alignment: .any)
else { return }
guard let raycastResult = self.arView?.session.raycast(query).first
else { return }
let currentPositionOfCamera = raycastResult.worldTransform.getPosition()
if currentPositionOfCamera != .zero {
let distanceFromCamera = frame.camera.transform.getPosition().distanceFrom(position: currentPositionOfCamera)
print("Distance from raycast:",distanceFromCamera)
if (distance < 0.5) {
print("Too Close")
}
}
}
I am just learning ARKit and RealityKit as well, but wouldn't your code be:
let currentPositionOfCamera = self.arView.cameraTransform.translation
if currentPositionOfCamera != .zero {
// distance is defined in simd as the distance between 2 points
let distanceFromCamera = distance(raycastResult.worldTransform.position, currentPositionOfCamera)
print("Distance from raycast:",distanceFromCamera)
if (distanceFromCamera < 0.5) {
print("Too Close")
let rayDirection = normalize(raycastResult.worldTransform.position - self.arView.cameraTransform.translation)
// This pulls the text back toward the camera from the plane
let textPositionInWorldCoordinates = result.worldTransform.position - (rayDirection * 0.1)
let textEntity = self.model(for: classification)
// This scales the text so it is of a consistent size
textEntity.scale = .one * raycastDistance
var textPositionWithCameraOrientation = self.arView.cameraTransform
textPositionWithCameraOrientation.translation = textPositionInWorldCoordinates
// self.textAnchor is defined somewhere in the class as an optional
self.textAnchor = AnchorEntity(world: textPositionWithCameraOrientation.matrix)
textAnchor.addChild(textEntity)
self.arView.scene.addAnchor(textAnchor)
} else {
guard let textAnchor = self.textAnchor else { return }
self.removeAnchor(textAnchor)
}
}
// Creates a text ModelEntity
func tooCloseModel() -> ModelEntity {
let lineHeight: CGFloat = 0.05
let font = MeshResource.Font.systemFont(ofSize: lineHeight)
let textMesh = MeshResource.generateText("Too Close", extrusionDepth: Float(lineHeight * 0.1), font: font)
let textMaterial = SimpleMaterial(color: classification.color, isMetallic: true)
let model = ModelEntity(mesh: textMesh, materials: [textMaterial])
// Center the text
model.position.x -= model.visualBounds(relativeTo: nil).extents.x / 2
return model
}
This code is adapted from Apple's Visualizing Scene Semantics.

Place node in front of the camera and rotate

I want to show a pointer when a user double taps on a sceneView. For this, I want to use SCNTorus because at some angle it is a circle.
let geometry = SCNTorus(ringRadius: 0.01, pipeRadius: 0.001)
let node = SCNNode(geometry: geometry)
I already get a vector from hitTest that I use as a position for the node
private func getVector(for point: CGPoint) -> SCNVector3? {
guard let hitTest = self.sceneView.hitTest(point, types: .featurePoint).first else {
return nil
}
let transform = SCNMatrix4.init(hitTest.worldTransform)
let vector = SCNVector3Make(transform.m41, transform.m42, transform.m43)
return vector
}
How can I transform it to always appear as a circle when the node added.
For vertical device position rotating by .pi/2 along X-axis works great but it breaks when devices move in a space, so I can not hardcode the angle
node.eulerAngles.x = .pi / 2
sceneView.scene.rootNode.addChildNode(node)
I believe I need to sceneView.pointOfView somehow to apply the transformation. But I stuck here.
Here is the full code
#objc private func didDoubleTap(_ sender: UITapGestureRecognizer) {
let point = sender.location(in: sceneView)
guard let vector = getVector(for: point) else { return }
guard let pov = sceneView.pointOfView else { return }
guard let camera = sceneView.session.currentFrame?.camera else { return }
let geometry = SCNTorus(ringRadius: 0.01, pipeRadius: 0.001)
let node = SCNNode(geometry: geometry)
node.position = vector
node.eulerAngles.x = .pi / 2
sceneView.scene.rootNode.addChildNode(node)
animatePointingNode(node)
}
I was able to solve the issue by setting eulerAngles.y to be equal to camera's Y and adjusting camera's X by .pi/2
guard let camera = sceneView.session.currentFrame?.camera else { return }
pointerNode.position = vector
pointerNode.eulerAngles.x = camera.eulerAngles.x + .pi / 2
pointerNode.eulerAngles.y = camera.eulerAngles.y

How to draw a line between two points in SceneKit?

If I have two points in SceneKit (e.g. (1,2,3) and (-1,-1,-1)). How do I draw a line between the two?
I see that there is a SCNBox object I may be able to use, but that only allows me to specify the center (e.g. via simdPosition). The other ways to modify it are the transform (which I don't know how to use), or the Euler angles (which I'm not sure how to calculate which ones I need to use).
You can draw a line between two points using the following approach:
import SceneKit
extension SCNGeometry {
class func line(vector1: SCNVector3,
vector2: SCNVector3) -> SCNGeometry {
let sources = SCNGeometrySource(vertices: [vector1,
vector2])
let index: [Int32] = [0,1]
let elements = SCNGeometryElement(indices: index,
primitiveType: .line)
return SCNGeometry(sources: [sources],
elements: [elements])
}
}
...and then feed it to addLine function in ViewController:
class ViewController: UIViewController {
// Some code...
func addLine(start: SCNVector3, end: SCNVector3) {
let lineGeo = SCNGeometry.line(vector1: start,
vector2: end)
let lineNode = SCNNode(geometry: lineGeo)
sceneView.scene.rootNode.addChildNode(lineNode)
}
}
As we all know line's width can't be changed (cause there's no property to do it), so you can use cylinder primitive geometry instead of a line:
extension SCNGeometry {
class func cylinderLine(from: SCNVector3,
to: SCNVector3,
segments: Int) -> SCNNode {
let x1 = from.x
let x2 = to.x
let y1 = from.y
let y2 = to.y
let z1 = from.z
let z2 = to.z
let distance = sqrtf( (x2-x1) * (x2-x1) +
(y2-y1) * (y2-y1) +
(z2-z1) * (z2-z1) )
let cylinder = SCNCylinder(radius: 0.005,
height: CGFloat(distance))
cylinder.radialSegmentCount = segments
cylinder.firstMaterial?.diffuse.contents = UIColor.green
let lineNode = SCNNode(geometry: cylinder)
lineNode.position = SCNVector3(x: (from.x + to.x) / 2,
y: (from.y + to.y) / 2,
z: (from.z + to.z) / 2)
lineNode.eulerAngles = SCNVector3(Float.pi / 2,
acos((to.z-from.z)/distance),
atan2((to.y-from.y),(to.x-from.x)))
return lineNode
}
}
...then feed it the same way to ViewController:
class ViewController: UIViewController {
// Some code...
func addLine(start: SCNVector3, end: SCNVector3) {
let cylinderLineNode = SCNGeometry.cylinderLine(from: start,
to: end,
segments: 3)
sceneView.scene.rootNode.addChildNode(cylinderLineNode)
}
}
First you'll need to calculate the heading and pitch between the two points. Full post is here and this answer explains how to do it between any arbitrary two points.
Once you have your two angles, if you attempt to use the Euler angles on an SCNBox, you'll notice that when you only modify the pitch (eulerAngles.x), or only modify the heading (eulerAngles.y), everything works fine. However, the moment you try to modify both, you'll run into issues. One solution is to wrap one node inside another.
This seemed like such a useful suggestion, that I create a handy wrapper node that should handle rotations upon all 3 axes:
import Foundation
import SceneKit
struct HeadingPitchBank {
let heading: Float
let pitch: Float
let bank: Float
/// returns the heading and pitch (bank is 0) represented by the vector
static func from(vector: simd_float3) -> HeadingPitchBank {
let heading = atan2f(vector.x, vector.z)
let pitch = atan2f(sqrt(vector.x*vector.x + vector.z*vector.z), vector.y) - Float.pi / 2.0
return HeadingPitchBank(heading: heading, pitch: pitch, bank: 0)
}
}
class HeadingPitchBankWrapper: SCNNode {
init(wrappedNode: SCNNode) {
headingNode = SCNNode()
pitchNode = SCNNode()
bankNode = SCNNode()
_wrappedNode = wrappedNode
super.init()
addChildNode(headingNode)
headingNode.addChildNode(pitchNode)
pitchNode.addChildNode(bankNode)
bankNode.addChildNode(wrappedNode)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
var heading: Float {
get {
return headingNode.eulerAngles.y
}
set {
headingNode.eulerAngles.y = newValue
}
}
var pitch: Float {
get {
return pitchNode.eulerAngles.x
}
set {
pitchNode.eulerAngles.x = newValue
}
}
var bank: Float {
get {
return bankNode.eulerAngles.z
}
set {
bankNode.eulerAngles.z = newValue
}
}
var headingPitchBank: HeadingPitchBank {
get {
return HeadingPitchBank(heading: heading, pitch: pitch, bank: bank)
}
set {
heading = newValue.heading
pitch = newValue.pitch
bank = newValue.bank
}
}
var wrappedNode: SCNNode {
return _wrappedNode
}
private var headingNode: SCNNode
private var pitchNode: SCNNode
private var bankNode: SCNNode
private var _wrappedNode: SCNNode
}
You could then use this to easily draw a line between two points:
func createLine(start: simd_float3 = simd_float3(), end: simd_float3, color: UIColor, opacity: CGFloat? = nil, radius: CGFloat = 0.005) -> SCNNode {
let length = CGFloat(simd_length(end-start))
let box = SCNNode(geometry: SCNBox(width: radius, height: radius, length: length, chamferRadius: 0))
box.geometry!.firstMaterial!.diffuse.contents = color
let wrapper = HeadingPitchBankWrapper(wrappedNode: box)
wrapper.headingPitchBank = HeadingPitchBank.from(vector: end - start)
wrapper.simdPosition = midpoint(start, end)
if let opacity = opacity {
wrapper.opacity = opacity
}
return wrapper
}
Just build a custom geometry using SCNGeometryPrimitiveType.line:
let vertices: [SCNVector3] = [
SCNVector3(1, 2, 3),
SCNVector3(-1, -1, -1)
]
let linesGeometry = SCNGeometry(
sources: [
SCNGeometrySource(vertices: vertices)
],
elements: [
SCNGeometryElement(
indices: [Int32]([0, 1]),
primitiveType: .line
)
]
)
let line = SCNNode(geometry: linesGeometry)
scene.rootNode.addChildNode(line)

Collision between two nodes not detected ARKit

I created two nodes: a sphere and a box:
var sphere = SCNNode(geometry: SCNSphere(radius: 0.005))
//I get the box node from scn file
let boxScene = SCNScene(named: "art.scnassets/world.scn")!
var boxNode: SCNNode?
I want two nodes or physicsBody's to interact, so I created a category for categoryBitMask and contactTestBitMask:
struct CollisionCategory: OptionSet {
let rawValue: Int
static let box = CollisionCategory(rawValue: 1)
static let sphere = CollisionCategory(rawValue: 2)
}
Here I set the box node as a physics body:
self.boxScene.rootNode.enumerateChildNodes { (node, _) in
if node.name == "box" {
boxNode = node
let boxBodyShape = SCNPhysicsShape(geometry: SCNBox(width: 0.1, height: 0.1, length: 0.1, chamferRadius: 0.1), options: nil)
let physicsBody = SCNPhysicsBody(type: .static, shape: boxBodyShape)
boxNode!.physicsBody = physicsBody
boxNode!.physicsBody?.categoryBitMask = CollisionCategory.box.rawValue
boxNode!.physicsBody?.contactTestBitMask = CollisionCategory.sphere.rawValue
boxNode!.physicsBody?.collisionBitMask = boxNode!.physicsBody!.contactTestBitMask
}
}
Here I set the sphere node in the render function, which you can move around the view:
func setUpSphere() {
let sphereBodySphere = SCNPhysicsShape(geometry: SCNSphere(radius: 0.005))
let physicsBody = SCNPhysicsBody(type: .kinematic, shape: sphereBodySphere)
sphere.physicsBody = physicsBody
sphere.physicsBody?.categoryBitMask = CollisionCategory.sphere.rawValue
sphere.physicsBody?.contactTestBitMask = CollisionCategory.box.rawValue
sphere.geometry?.firstMaterial?.diffuse.contents = UIColor.blue
sphere.physicsBody?.collisionBitMask = sphere.physicsBody!.contactTestBitMask
previousPoint = currentPosition
}
///It Adds a sphere and changes his position
func renderer(_ renderer: SCNSceneRenderer, willRenderScene scene: SCNScene, atTime time: TimeInterval) {
guard let pointOfView = sceneView.pointOfView else { return }
let mat = pointOfView.transform
let dir = SCNVector3(-1 * mat.m31, -1 * mat.m32, -1 * mat.m33)
let currentPosition = pointOfView.position + (dir * 0.185)
if buttonPressed {
if let previousPoint = previousPoint {
sphere.position = currentPosition
sceneView.scene.rootNode.addChildNode(sphere)
}
}
}
I added the protocol SCNPhysicsContactDelegate to the ViewController,
and I set in ViewDidLoad():
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
sceneView.scene.physicsWorld.contactDelegate = self
///I correctly see the shapes of the sphere and the box physics bodies using
sceneView.debugOptions = .showPhysicsShapes
createBox()
setUpSphere()
sceneView.scene = boxScene
sceneView.scene.physicsWorld.contactDelegate = self
}
Then I added that function:
func physicsWorld(_ world: SCNPhysicsWorld, didEnd contact: SCNPhysicsContact) {
print("Collision!")
}
This is what happens.
When the two nodes collide nothing happens, so I can't know if the two bodies are touching. Could the problem be about .kinematic, .static or about the function render()?
I followed step by step different tutorials about collisions in ARKit: Tutorial 1, Tutorial 2.
I have no idea why it doesn't work like expected.
Is there something wrong in my code?
Download file code link: https://ufile.io/20sla
willRenderScene is called uptown 60 times a second for every time the scene is going to be rendered. Since you're recreating the physics body every time it's probably messing up the physics engine determine collisions.
Try changing your code to only create the physics body once during setup.

ARKit Calculate distance from a wall to the camera

I’m developing a project with ARKit. I want to calculate the measure from a wall to the camera and it updates when I move away or I move closer.
Now, i have activated that it detects horizontal and vertical surfaces. When I get a surface, I calculate the distance from the camera position and the center of the surface. After I apply the calculus that it gets the distance between 2 points in a 3D space (Euclidean).
https://math.stackexchange.com/questions/42640/calculate-distance-in-3d-space
Is it correct? Can you help me?
class ViewController: UIViewController, ARSCNViewDelegate, ARSessionDelegate {
let configuration = ARWorldTrackingConfiguration()
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
configuration.planeDetection = [.horizontal, .vertical]
sceneView.session.run(configuration)
......
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let planeAnchor = anchor as? ARPlaneAnchor else { return }
let plane = SCNPlane(width: CGFloat(planeAnchor.extent.x), height:
CGFloat(planeAnchor.extent.z))
let planeNode = SCNNode(geometry: plane)
planeNode.simdPosition = float3(planeAnchor.center.x, 0,
planeAnchor.center.z)
planeNode.eulerAngles.x = -.pi / 2
node.addChildNode(planeNode)
let distance = distanceFromCamera(x: planeAnchor.center.x, y: 0, z: planeAnchor.center.z)
let formatted = String(format: "Distance: %.2f", distance)
print(formatted) q
}
private func distanceFromCamera(x: Float, y:Float, z:Float) -> Float {
let cameraPosition = self.sceneView.session.currentFrame!.camera.transform.columns.3
print("Camera: \(cameraPosition)")
let vector = SCNVector3Make(cameraPosition.x - x, cameraPosition.y - y, cameraPosition.z - z)
// Scene units map to meters in ARKit.
return sqrtf(vector.x * vector.x + vector.y * vector.y + vector.z * vector.z)
}
}
Add Following method
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
guard let currentBall = self.currentBall else {return}
DispatchQueue.main.async {
if let centerPosition = self.hitTestCenterVector() {
let startPositionOfBall = currentBall.position
let distance = self.getDistanceBetween(vector1: centerPosition, vector2: startPositionOfBall)
self.lblDistance.text = String(format: "%.1f", distance) //meter
}
}
}
Just replace self.currentBall in guard statement with your SCNNode it is from where you want to cal. distance
Now This is method to for calculations
func hitTestCenterVector () -> SCNVector3? {
let results = self.sceneView.hitTest(self.sceneView.center, types: .existingPlane)
if let firstObject = results.first {
return SCNVector3(firstObject.worldTransform.columns.3.x, firstObject.worldTransform.columns.3.y, firstObject.worldTransform.columns.3.z)
}
return nil
}
func getDistanceBetween(vector1:SCNVector3, vector2:SCNVector3) -> CGFloat {
return CGFloat(sqrt((vector1.x - vector2.x) * (vector1.x - vector2.x)
+ (vector1.y - vector2.y) * (vector1.y - vector2.y)
+ (vector1.z - vector2.z) * (vector1.z - vector2.z)))
}
Hope it is helpful