Add SCNNode or UIView to the center of SCNView in order to detect other SCNNodes - swift

I am trying to center a UIView to the center of my SCNView in Order to detect the other added SCNTorus nodes in my scene.
I added a view to the center of my sceneView like below
var focusPoint: CGPoint {
return CGPoint(
x: sceneView.bounds.size.width / 2,
y: sceneView.bounds.size.height - (sceneView.bounds.size.height / 1.618))
}
Then I tried two ways :
1 -
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
DispatchQueue.main.async { [weak self] in
guard let strongSelf = self else { return }
if !strongSelf.inEditMode { return }
for node in strongSelf.selectionRingsNodes {
let projectedPoint = renderer.projectPoint(node.position)
let projectedCGPoint = CGPoint(x: CGFloat(projectedPoint.x), y: CGFloat(projectedPoint.y))
let distance = projectedCGPoint.distance(to: strongSelf.focusPoint)
if distance < 20 {
print(node.name)
}
}
}
}
2 -
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
DispatchQueue.main.async { [weak self] in
guard let strongSelf = self else { return }
if !strongSelf.inEditMode { return }
for node in strongSelf.selectionRingsNodes {
let (min, max) = node.boundingBox
let projectedMinPoint = renderer.projectPoint(min)
let projectedMinCGPoint = CGPoint(x: CGFloat(projectedMinPoint.x), y: CGFloat(projectedMinPoint.y))
let projectedMaxPoint = renderer.projectPoint(max)
let projectedMaxCGPoint = CGPoint(x: CGFloat(projectedMaxPoint.x), y: CGFloat(projectedMaxPoint.y))
let minX = CGFloat(projectedMinCGPoint.x)
let maxX = CGFloat(projectedMaxCGPoint.x)
let minY = CGFloat(projectedMinCGPoint.y)
let maxY = CGFloat(projectedMaxCGPoint.y)
let nodeRect = CGRect(x: minX, y: minY, width: maxX - minX, height: maxY - minY)
if nodeRect.contains(strongSelf.focusPoint) {
print(node.name)
}
}
}
}
These two methods return the wrong results, a very big distance, and very big x and y.

Finally I Got the solution!
It turned out that I should convert the position to the scene’s world coordinate space, using this method convertPosition(SCNVector3,to:)
Here the complete code :
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
DispatchQueue.main.async { [weak self] in
guard let strongSelf = self else { return }
if !strongSelf.inEditMode { return }
for node in strongSelf.selectionRingsNodes {
let position = node.convertPosition(SCNVector3Zero, to: nil)
let projectedPoint = renderer.projectPoint(position)
let projectedCGPoint = CGPoint(x: CGFloat(projectedPoint.x), y: CGFloat(projectedPoint.y))
let distance = projectedCGPoint.distance(to: strongSelf.focusPoint)
if distance < 50 {
strongSelf.showToast(message: node.getTopMostParentNode().name!, font: .systemFont(ofSize: 30))
}
}
}
}

Related

Face position using visionKit in ARKit

I added visionKit face detection on an ARSCNView, it cab detect the face, here how I did that
public func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
let faceDetectionRequest = VNDetectFaceRectanglesRequest(completionHandler: { (request: VNRequest, error: Error?) in
DispatchQueue.main.async {
self.faceLayers.forEach { drawing in
drawing.removeFromSuperlayer()
}
if let observations = request.results as? [VNFaceObservation] {
self.handleFaceDetectionObservations(observations: observations)
}
}
})
guard let capturedImage = sceneView.session.currentFrame?.capturedImage else { return }
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: capturedImage, orientation: .leftMirrored, options: [:])
do {
try imageRequestHandler.perform([faceDetectionRequest])
} catch {
print("perform fail, error: ", error.localizedDescription)
}
}
fileprivate func handleFaceDetectionObservations(observations: [VNFaceObservation]) {
for observation in observations {
let newWidth = sceneView.bounds.width * observation.boundingBox.width
let newHeight = sceneView.bounds.height * observation.boundingBox.height
let newX = sceneView.bounds.width * observation.boundingBox.origin.x
let newY = sceneView.bounds.height * observation.boundingBox.origin.y
let faceRectConverted = CGRect(x: newX, y: newY, width: newWidth, height: newHeight)
let faceRectanglePath = CGPath(rect: faceRectConverted, transform: nil)
let faceLayer = CAShapeLayer()
faceLayer.path = faceRectanglePath
faceLayer.fillColor = UIColor.black.cgColor
self.faceLayers.append(faceLayer)
self.sceneView.layer.addSublayer(faceLayer)
}
}
The only issue that I have here is the face position in the view, it's calculated wrong. Looks like the problem come from camera mirroring, when the goes right, the face rectangular goes left, or when the face goes up, the rectangular goes down. I don't know how to do the right calculation to tie observation rect to the right place in sceneView . Could anyone help me on that!
Also have a same problem on landscape, the rectangular height is more compact there...
Thanks

How to play local video When Image is recognized using Arkit in Swift?

I have image recognizes by using AR kit ,when detect image I need to show and play the video on presented scene (like above the detected image)?
lazy var fadeAndSpinAction: SCNAction = {
return .sequence([
.fadeIn(duration: fadeDuration),
.rotateBy(x: 0, y: 0, z: CGFloat.pi * 360 / 180, duration: rotateDuration),
.wait(duration: waitDuration),
.fadeOut(duration: fadeDuration)
])
}()
lazy var fadeAction: SCNAction = {
return .sequence([
.fadeOpacity(by: 0.8, duration: fadeDuration),
.wait(duration: waitDuration),
.fadeOut(duration: fadeDuration)
])
}()
lazy var fishNode: SCNNode = {
guard let scene = SCNScene(named: "Catfish1.scn"),
let node = scene.rootNode.childNode(withName: "Catfish1", recursively: false) else { return SCNNode() }
let scaleFactor = 0.005
node.scale = SCNVector3(scaleFactor, scaleFactor, scaleFactor)
node.eulerAngles.x = -.pi / 2
return node
}()
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
sceneView.delegate = self
configureLighting()
}
func configureLighting() {
sceneView.autoenablesDefaultLighting = true
sceneView.automaticallyUpdatesLighting = true
}
override func viewWillAppear(_ animated: Bool) {
resetTrackingConfiguration()
}
func resetTrackingConfiguration() {
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else { return }
let configuration = ARWorldTrackingConfiguration()
configuration.detectionImages = referenceImages
let options: ARSession.RunOptions = [.resetTracking, .removeExistingAnchors]
sceneView.session.run(configuration, options: options)
statusLabel.text = "Move camera around to detect images"
}
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
DispatchQueue.main.async {
guard let imageAnchor = anchor as? ARImageAnchor,
let imageName = imageAnchor.referenceImage.name else { return }
// TODO: Overlay 3D Object
let overlayNode = self.getNode(withImageName: imageName)
overlayNode.opacity = 0
overlayNode.position.y = 0.2
overlayNode.runAction(self.fadeAndSpinAction)
node.addChildNode(overlayNode)
self.statusLabel.text = "Image detected: \"\(imageName)\""
self.videoNode.geometry = SCNPlane(width: 1276.0 / 2.0, height: 712.0 / 2.0)
self.spriteKitScene.scaleMode = .aspectFit
self.videoSpriteKitNode?.position = CGPoint(x: self.spriteKitScene.size.width / 2.0, y: self.spriteKitScene.size.height / 2.0)
self.videoSpriteKitNode?.size = self.spriteKitScene.size
self.spriteKitScene.addChild(self.videoSpriteKitNode!)
self.videoNode.geometry?.firstMaterial?.diffuse.contents = self.spriteKitScene
var transform = SCNMatrix4MakeRotation(Float(M_PI), 0.0, 0.0, 1.0)
transform = SCNMatrix4Translate(transform, 1.0, 1.0, 0)
self.videoNode.geometry?.firstMaterial?.diffuse.contentsTransform = transform
self.videoNode.position = SCNVector3(x: 0, y: 30, z: 7)
node.addChildNode(self.videoNode)
self.videoSpriteKitNode?.play()
}
}
func getPlaneNode(withReferenceImage image: ARReferenceImage) -> SCNNode {
let plane = SCNPlane(width: image.physicalSize.width,
height: image.physicalSize.height)
let node = SCNNode(geometry: plane)
return node
}`
Looking at your code, firstly you are setting your SCNPlane to be 638 Metres wide and 356 Meters tall, I'm sure thats not what you actually want ^________^.
Anyway, here is an example of playing a local video using an SKScene & SKVideoNode which works well:
//--------------------------
// MARK: - ARSCNViewDelegate
//--------------------------
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have An ARImageAnchor And Have Detected Our Reference Image
guard let imageAnchor = anchor as? ARImageAnchor else { return }
let referenceImage = imageAnchor.referenceImage
//2. Get The Physical Width & Height Of Our Reference Image
let width = CGFloat(referenceImage.physicalSize.width)
let height = CGFloat(referenceImage.physicalSize.height)
//3. Create An SCNNode To Hold Our Video Player With The Same Size As The Image Target
let videoHolder = SCNNode()
let videoHolderGeometry = SCNPlane(width: width, height: height)
videoHolder.transform = SCNMatrix4MakeRotation(-Float.pi / 2, 1, 0, 0)
videoHolder.geometry = videoHolderGeometry
//4. Create Our Video Player
if let videoURL = Bundle.main.url(forResource: "BlackMirrorz", withExtension: "mp4"){
setupVideoOnNode(videoHolder, fromURL: videoURL)
}
//5. Add It To The Hierarchy
node.addChildNode(videoHolder)
}
/// Creates A Video Player As An SCNGeometries Diffuse Contents
///
/// - Parameters:
/// - node: SCNNode
/// - url: URL
func setupVideoOnNode(_ node: SCNNode, fromURL url: URL){
//1. Create An SKVideoNode
var videoPlayerNode: SKVideoNode!
//2. Create An AVPlayer With Our Video URL
let videoPlayer = AVPlayer(url: url)
//3. Intialize The Video Node With Our Video Player
videoPlayerNode = SKVideoNode(avPlayer: videoPlayer)
videoPlayerNode.yScale = -1
//4. Create A SpriteKitScene & Postion It
let spriteKitScene = SKScene(size: CGSize(width: 600, height: 300))
spriteKitScene.scaleMode = .aspectFit
videoPlayerNode.position = CGPoint(x: spriteKitScene.size.width/2, y: spriteKitScene.size.height/2)
videoPlayerNode.size = spriteKitScene.size
spriteKitScene.addChild(videoPlayerNode)
//6. Set The Nodes Geoemtry Diffuse Contenets To Our SpriteKit Scene
node.geometry?.firstMaterial?.diffuse.contents = spriteKitScene
//5. Play The Video
videoPlayerNode.play()
videoPlayer.volume = 0
}
}
Update:
If you want to place the video above the target you can do something like the following:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have An ARImageAnchor And Have Detected Our Reference Image
guard let imageAnchor = anchor as? ARImageAnchor else { return }
let referenceImage = imageAnchor.referenceImage
//2. Get The Physical Width & Height Of Our Reference Image
let width = CGFloat(referenceImage.physicalSize.width)
let height = CGFloat(referenceImage.physicalSize.height)
//3. Create An SCNNode To Hold Our Video Player
let videoHolder = SCNNode()
let planeHeight = height/2
let videoHolderGeometry = SCNPlane(width: width, height: planeHeight)
videoHolder.transform = SCNMatrix4MakeRotation(-Float.pi / 2, 1, 0, 0)
videoHolder.geometry = videoHolderGeometry
//4. Place It About The Target
let zPosition = height - (planeHeight/2)
videoHolder.position = SCNVector3(0, 0, -zPosition)
//5. Create Our Video Player
if let videoURL = Bundle.main.url(forResource: "BlackMirrorz", withExtension: "mp4"){
setupVideoOnNode(videoHolder, fromURL: videoURL)
}
//5. Add It To The Hierachy
node.addChildNode(videoHolder)
}
Hope it helps...

ARKit Calculate distance from a wall to the camera

I’m developing a project with ARKit. I want to calculate the measure from a wall to the camera and it updates when I move away or I move closer.
Now, i have activated that it detects horizontal and vertical surfaces. When I get a surface, I calculate the distance from the camera position and the center of the surface. After I apply the calculus that it gets the distance between 2 points in a 3D space (Euclidean).
https://math.stackexchange.com/questions/42640/calculate-distance-in-3d-space
Is it correct? Can you help me?
class ViewController: UIViewController, ARSCNViewDelegate, ARSessionDelegate {
let configuration = ARWorldTrackingConfiguration()
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
configuration.planeDetection = [.horizontal, .vertical]
sceneView.session.run(configuration)
......
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let planeAnchor = anchor as? ARPlaneAnchor else { return }
let plane = SCNPlane(width: CGFloat(planeAnchor.extent.x), height:
CGFloat(planeAnchor.extent.z))
let planeNode = SCNNode(geometry: plane)
planeNode.simdPosition = float3(planeAnchor.center.x, 0,
planeAnchor.center.z)
planeNode.eulerAngles.x = -.pi / 2
node.addChildNode(planeNode)
let distance = distanceFromCamera(x: planeAnchor.center.x, y: 0, z: planeAnchor.center.z)
let formatted = String(format: "Distance: %.2f", distance)
print(formatted) q
}
private func distanceFromCamera(x: Float, y:Float, z:Float) -> Float {
let cameraPosition = self.sceneView.session.currentFrame!.camera.transform.columns.3
print("Camera: \(cameraPosition)")
let vector = SCNVector3Make(cameraPosition.x - x, cameraPosition.y - y, cameraPosition.z - z)
// Scene units map to meters in ARKit.
return sqrtf(vector.x * vector.x + vector.y * vector.y + vector.z * vector.z)
}
}
Add Following method
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
guard let currentBall = self.currentBall else {return}
DispatchQueue.main.async {
if let centerPosition = self.hitTestCenterVector() {
let startPositionOfBall = currentBall.position
let distance = self.getDistanceBetween(vector1: centerPosition, vector2: startPositionOfBall)
self.lblDistance.text = String(format: "%.1f", distance) //meter
}
}
}
Just replace self.currentBall in guard statement with your SCNNode it is from where you want to cal. distance
Now This is method to for calculations
func hitTestCenterVector () -> SCNVector3? {
let results = self.sceneView.hitTest(self.sceneView.center, types: .existingPlane)
if let firstObject = results.first {
return SCNVector3(firstObject.worldTransform.columns.3.x, firstObject.worldTransform.columns.3.y, firstObject.worldTransform.columns.3.z)
}
return nil
}
func getDistanceBetween(vector1:SCNVector3, vector2:SCNVector3) -> CGFloat {
return CGFloat(sqrt((vector1.x - vector2.x) * (vector1.x - vector2.x)
+ (vector1.y - vector2.y) * (vector1.y - vector2.y)
+ (vector1.z - vector2.z) * (vector1.z - vector2.z)))
}
Hope it is helpful

arkit anchor or node visible in camera and sitting to left or right of frustum

How can i detect if an ARAnchor is currently visible in the camera, i need to test when the camera view changes.
I want to put arrows on the edge of the screen that point in the direction of the anchor when not on screen. I need to know if the node sits to the left or right of the frustum.
I am now doing this but it says pin is visible when it is not and the X values seem not right? Maybe the renderer frustum does not match the screen camera?
var deltaTime = TimeInterval()
public func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
deltaTime = time - lastUpdateTime
if deltaTime>1{
if let annotation = annotationsByNode.first {
let node = annotation.key.childNodes[0]
if !renderer.isNode(node, insideFrustumOf: renderer.pointOfView!)
{
print("Pin is not visible");
}else {
print("Pin is visible");
}
let pnt = renderer.projectPoint(node.position)
print("pos ", pnt.x, " ", renderer.pointOfView!.position)
}
lastUpdateTime = time
}
}
Update: The code works to show if node is visible or not, how can i tell which direction left or right a node is in relation to the camera frustum?
update2! as suggested answer from Bhanu Birani
let screenWidth = UIScreen.main.bounds.width
let screenHeight = UIScreen.main.bounds.height
let leftPoint = CGPoint(x: 0, y: screenHeight/2)
let rightPoint = CGPoint(x: screenWidth,y: screenHeight/2)
let leftWorldPos = renderer.unprojectPoint(SCNVector3(leftPoint.x,leftPoint.y,0))
let rightWorldPos = renderer.unprojectPoint(SCNVector3(rightPoint.x,rightPoint.y,0))
let distanceLeft = node.position - leftWorldPos
let distanceRight = node.position - rightWorldPos
let dir = (isVisible) ? "visible" : ( (distanceLeft.x<distanceRight.x) ? "left" : "right")
I got it working finally which uses the idea from Bhanu Birani of the left and right of the screen but i get the world position differently, unProjectPoint and also get a scalar value of distance which i compare to get the left/right direction. Maybe there is a better way of doing it but it worked for me
public func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
deltaTime = time - lastUpdateTime
if deltaTime>0.25{
if let annotation = annotationsByNode.first {
guard let pointOfView = renderer.pointOfView else {return}
let node = annotation.key.childNodes[0]
let isVisible = renderer.isNode(node, insideFrustumOf: pointOfView)
let screenWidth = UIScreen.main.bounds.width
let screenHeight = UIScreen.main.bounds.height
let leftPoint = CGPoint(x: 0, y: screenHeight/2)
let rightPoint = CGPoint(x: screenWidth,y: screenHeight/2)
let leftWorldPos = renderer.unprojectPoint(SCNVector3(leftPoint.x, leftPoint.y,0))
let rightWorldPos = renderer.unprojectPoint(SCNVector3(rightPoint.x, rightPoint.y,0))
let distanceLeft = node.worldPosition.distance(vector: leftWorldPos)
let distanceRight = node.worldPosition.distance(vector: rightWorldPos)
//let pnt = renderer.projectPoint(node.worldPosition)
//guard let pnt = renderer.pointOfView!.convertPosition(node.position, to: nil) else {return}
let dir = (isVisible) ? "visible" : ( (distanceLeft<distanceRight) ? "left" : "right")
print("dir" , dir, " ", leftWorldPos , " ", rightWorldPos)
lastDir=dir
delegate?.nodePosition?(node:node, pos: dir)
}else {
delegate?.nodePosition?(node:nil, pos: lastDir )
}
lastUpdateTime = time
}
extension SCNVector3
{
/**
* Returns the length (magnitude) of the vector described by the SCNVector3
*/
func length() -> Float {
return sqrtf(x*x + y*y + z*z)
}
/**
* Calculates the distance between two SCNVector3. Pythagoras!
*/
func distance(vector: SCNVector3) -> Float {
return (self - vector).length()
}
}
Project the ray from the from the following screen positions:
leftPoint = CGPoint(0, screenHeight/2) (centre left of the screen)
rightPoint = CGPoint(screenWidth, screenHeight/2) (centre right of the screen)
Convert CGPoint to world position:
leftWorldPos = convertCGPointToWorldPosition(leftPoint)
rightWorldPos = convertCGPointToWorldPosition(rightPoint)
Calculate the distance of node from both world position:
distanceLeft = node.position - leftWorldPos
distanceRight = node.position - rightWorldPos
Compare distance to find the shortest distance to the node. Use the shortest distance vector to position direction arrow for object.
Here is the code from tsukimi to check if the object is in right side of screen or on left side:
public func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
deltaTime = time - lastUpdateTime
if deltaTime>0.25{
if let annotation = annotationsByNode.first {
guard let pointOfView = renderer.pointOfView else {return}
let node = annotation.key.childNodes[0]
let isVisible = renderer.isNode(node, insideFrustumOf: pointOfView)
let screenWidth = UIScreen.main.bounds.width
let screenHeight = UIScreen.main.bounds.height
let leftPoint = CGPoint(x: 0, y: screenHeight/2)
let rightPoint = CGPoint(x: screenWidth,y: screenHeight/2)
let leftWorldPos = renderer.unprojectPoint(SCNVector3(leftPoint.x, leftPoint.y,0))
let rightWorldPos = renderer.unprojectPoint(SCNVector3(rightPoint.x, rightPoint.y,0))
let distanceLeft = node.worldPosition.distance(vector: leftWorldPos)
let distanceRight = node.worldPosition.distance(vector: rightWorldPos)
//let pnt = renderer.projectPoint(node.worldPosition)
//guard let pnt = renderer.pointOfView!.convertPosition(node.position, to: nil) else {return}
let dir = (isVisible) ? "visible" : ( (distanceLeft<distanceRight) ? "left" : "right")
print("dir" , dir, " ", leftWorldPos , " ", rightWorldPos)
lastDir=dir
delegate?.nodePosition?(node:node, pos: dir)
}else {
delegate?.nodePosition?(node:nil, pos: lastDir )
}
lastUpdateTime = time
}
Following is the class to help performing operations on vector
extension SCNVector3 {
init(_ vec: vector_float3) {
self.x = vec.x
self.y = vec.y
self.z = vec.z
}
func length() -> Float {
return sqrtf(x * x + y * y + z * z)
}
mutating func setLength(_ length: Float) {
self.normalize()
self *= length
}
mutating func setMaximumLength(_ maxLength: Float) {
if self.length() <= maxLength {
return
} else {
self.normalize()
self *= maxLength
}
}
mutating func normalize() {
self = self.normalized()
}
func normalized() -> SCNVector3 {
if self.length() == 0 {
return self
}
return self / self.length()
}
static func positionFromTransform(_ transform: matrix_float4x4) -> SCNVector3 {
return SCNVector3Make(transform.columns.3.x, transform.columns.3.y, transform.columns.3.z)
}
func friendlyString() -> String {
return "(\(String(format: "%.2f", x)), \(String(format: "%.2f", y)), \(String(format: "%.2f", z)))"
}
func dot(_ vec: SCNVector3) -> Float {
return (self.x * vec.x) + (self.y * vec.y) + (self.z * vec.z)
}
func cross(_ vec: SCNVector3) -> SCNVector3 {
return SCNVector3(self.y * vec.z - self.z * vec.y, self.z * vec.x - self.x * vec.z, self.x * vec.y - self.y * vec.x)
}
}
extension SCNVector3{
func distance(receiver:SCNVector3) -> Float{
let xd = receiver.x - self.x
let yd = receiver.y - self.y
let zd = receiver.z - self.z
let distance = Float(sqrt(xd * xd + yd * yd + zd * zd))
if (distance < 0){
return (distance * -1)
} else {
return (distance)
}
}
}
Here is the code snippet to convert tap location or any CGPoint to world transform.
#objc func handleTap(_ sender: UITapGestureRecognizer) {
// Take the screen space tap coordinates and pass them to the hitTest method on the ARSCNView instance
let tapPoint = sender.location(in: sceneView)
let result = sceneView.hitTest(tapPoint, types: ARHitTestResult.ResultType.existingPlaneUsingExtent)
// If the intersection ray passes through any plane geometry they will be returned, with the planes
// ordered by distance from the camera
if (result.count > 0) {
// If there are multiple hits, just pick the closest plane
if let hitResult = result.first {
let finalPosition = SCNVector3Make(hitResult.worldTransform.columns.3.x + insertionXOffset,
hitResult.worldTransform.columns.3.y + insertionYOffset,
hitResult.worldTransform.columns.3.z + insertionZOffset
);
}
}
}
Following is the code to get hit test results when there's no plane found.
// check what nodes are tapped
let p = gestureRecognize.location(in: scnView)
let hitResults = scnView.hitTest(p, options: [:])
// check that we clicked on at least one object
if hitResults.count > 0 {
// retrieved the first clicked object
let result = hitResults[0]
}
This answer is a bit late but can be useful for someone needing to know where a node is in camera space relatively to the center (e.g. top left corner, centered ...).
You can get your node position in camera space using scene.rootNode.convertPosition(node.position, to: pointOfView).
In camera space,
(isVisible && (x=0, y=0)) means that your node is in front of the camera.
(isVisible && (x=0.1)) means that the node is a little bit on the right.
Some sample code :
public func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
deltaTime = time - lastUpdateTime
if deltaTime>0.25{
if let annotation = annotationsByNode.first {
guard let pointOfView = renderer.pointOfView else {return}
let node = annotation.key.childNodes[0]
let isVisible = renderer.isNode(node, insideFrustumOf: pointOfView)
// Translate node to camera space
let nodeInCameraSpace = scene.rootNode.convertPosition(node.position, to: pointOfView)
let isCentered = isVisible && (nodeInCameraSpace.x < 0.1) && (nodeInCameraSpace.y < 0.1)
let isOnTheRight = isVisible && (nodeInCameraSpace.x > 0.1)
// ...
delegate?.nodePosition?(node:node, pos: dir)
}else {
delegate?.nodePosition?(node:nil, pos: lastDir )
}
lastUpdateTime = time
}

How to make shape not to go to GameOver? -SpriteKit

I am creating a game where there is a square shape and every time the player taps on the square, it goes to GameOver scene. All I want to do is when the square shape is tapped, it will be allotted different position of the screen to be tapped on the squares.
Here is my code:
let touch:UITouch = touches.first!
let positionInScene = touch.location(in: self)
let touchedNode = self.atPoint(positionInScene)
if let name = touchedNode.name
{
//The first ball that shows up
if name == "startball"
{
print("Touched", terminator: "")
addBall(ballSize)
self.addChild(score)
}
else if name == "shape"{
scoreCount += 1
addBall(ballSize)
audioPlayer.play()
}
}
else {
let scene = GameOver(size: self.size)
scene.setMyScore(0)
let skView = self.view! as SKView
skView.ignoresSiblingOrder = true
scene.scaleMode = .resizeFill
scene.size = skView.bounds.size
scene.setMessage("You Lost!")
scene.setEndGameMode(va)
gameTimer.invalidate()
shownTimer.invalidate()
print(" timers invalidated ", terminator: "")
ran = true
skView.presentScene(scene, transition: SKTransition.crossFade(withDuration: 0.25))
}
if firstTouch {
shownTimer = Timer.scheduledTimer(timeInterval: 1, target: self, selector: #selector(GameScene.decTimer), userInfo: nil, repeats: true)
gameTimer = Timer.scheduledTimer(timeInterval: TIME_INCREMENT, target:self, selector: Selector("endGame"), userInfo: nil, repeats: false)
firstTouch = false
}
if touchCount > 5 {
for touch: AnyObject in touches {
let skView = self.view! as SKView
skView.ignoresSiblingOrder = true
var scene: Congratz!
scene = Congratz(size: skView.bounds.size)
scene.scaleMode = .aspectFill
skView.presentScene(scene, transition: SKTransition.doorsOpenHorizontal(withDuration: 1.0))
}
}
touchCount += 1
}
override func update(_ currentTime: TimeInterval) {
super.update(currentTime)
}
func endGame(){
shownTimer.invalidate()
gameTimer.invalidate()
let scene = GameOver(size: self.size)
scene.setMyScore(scoreCount)
if let skView = self.view {
skView.ignoresSiblingOrder = false
scene.scaleMode = .resizeFill
scene.size = skView.bounds.size
scene.setMessage("Times up")
skView.presentScene(scene, transition: SKTransition.crossFade(withDuration: 0.25))
}
}
override func addBall(_ size: Int) {
// Add the ball
let currentBall = SKShapeNode(circleOfRadius: CGFloat(size))
let viewMidX = view!.bounds.midX
let viewMidY = view!.bounds.midY
currentBall.fillColor = pickColor()
currentBall.position = randomBallPosition()
if scoreCount != 0{
if scoreCount == 1{
self.addChild(score)
self.addChild(timeLeftLabel)
self.childNode(withName: "welcome")?.removeFromParent()
}
self.childNode(withName: "ball")?.run(getSmaller)
self.childNode(withName: "ball")?.removeFromParent()
}
currentBall.name = "ball"
self.addChild(currentBall)
}
func addSquare(_ size: Int) {
// Add the square
let shape = SKShapeNode(rectOf: CGSize(width:CGFloat(size), height:CGFloat(size)))
shape.path = UIBezierPath(roundedRect: CGRect(x: 64, y: 64, width: 160, height: 160), cornerRadius: 50).cgPath
shape.fillColor = pickColor()
shape.position = randomBallPosition()
shape.name = "shape"
self.addChild(shape)
}
func randomBallPosition() -> CGPoint {
let xPosition = CGFloat(arc4random_uniform(UInt32((view?.bounds.maxX)! + 1)))
let yPosition = CGFloat(arc4random_uniform(UInt32((view?.bounds.maxY)! + 1)))
return CGPoint(x: xPosition, y: yPosition)
}
What I would suggest would be to make something like an enum for the different shapes, so you can keep track of what shape you're using.
enum GameShape: Int {
case circle = 0
case square = 1
}
Then create a GameShape property at the top of your GameScene:
var currentShape: GameShape = .circle
Then you could create some sort of updateShape method, which you could call in your touchesBegan method instead of just addBall
func updateShape(shapeSize: CGSize) {
switch currentShape {
case .circle:
addCircle(shapeSize)
case .square:
addSquare(shapeSize)
default:
break
}
// However you want to setup the condition for changing shape
if (condition) {
currentShape = .square
}
}
func addBall(_ size: CGSize) {
// Add the ball
}
func addSquare(_ size: CGSize) {
// Add the square
}
Now in your touchesBegan method, instead of calling addBall(size, you could call updateShape:
override func touchesBegan(_ touches: Set<UITouch>!, with event: UIEvent?) {
// Setup your code for detecting position for shape origin
updateShape(shapeSize)
}
EDIT - Your code is a mess. You really should take the time to make sure it's properly formatted when you submit it, otherwise it's really hard to help you. Indentation helps to see where a closure begins and ends. From what I can tell, it looks like you have two or more functions nested within your addBall method. This is not good. I tried my best to clean it up for you. You'll still need to write the code to make the shape a square, but I've lead you in the right direction to start to make that happen:
func addBall(_ size: CGSize) {
// Add the ball
let currentBall = SKShapeNode(circleOfRadius: CGFloat(size))
let viewMidX = view!.bounds.midX
let viewMidY = view!.bounds.midY
currentBall.fillColor = pickColor()
shape.path = UIBezierPath(roundedRect: CGRect(x: 64, y: 64, width: 160, height: 160), cornerRadius: 50).cgPath
shape.fillColor = pickColor()
currentBall.position = randomBallPosition()
shape.position = randomBallPosition()
self.addChild(shape)
if scoreCount != 0{
if scoreCount == 1{
self.addChild(score)
self.addChild(timeLeftLabel)
self.childNode(withName: "welcome")?.removeFromParent()
}
self.childNode(withName: "ball")?.run(getSmaller)
self.childNode(withName: "ball")?.removeFromParent()
}
currentBall.name = "ball"
shape.name = "ball"
self.addChild(currentBall)
}
func addSquare(_ size: CGSize) {
// Add the square
}
func randomBallPosition() -> CGPoint {
let xPosition = CGFloat(arc4random_uniform(UInt32((view?.bounds.maxX)! + 1)))
let yPosition = CGFloat(arc4random_uniform(UInt32((view?.bounds.maxY)! + 1)))
return CGPoint(x: xPosition, y: yPosition)
}
Now the above code assumes you have some property named shape because from what I could tell you never explicitly instantiated that, but you begin manipulating it.