SpriteKit: detect if UILongPressGestureRecognizer tapped child node - swift

I want to detect whether my node was tapped or not. I am using UIGestureRecognizer:
let longPress = UILongPressGestureRecognizer()
longPress.minimumPressDuration = CFTimeInterval(0.0)
longPress.addTarget(self, action: #selector(self.longPressGesture(longpressGest:)))
self.view?.addGestureRecognizer(longPress)
And the function that is called:
#objc func longPressGesture(longpressGest: UIGestureRecognizer) {
let touchPos = longpressGest.location(in: self.view)
if atPoint(touchPos).name == "jump" {
print("jump")
}
}
My button which I want to be detected when it is tapped:
let jump = SKSpriteNode(imageNamed: "Any")
jump = CGSize(width: self.size.width*0.06, height: self.size.height*0.08)
jump = CGPoint(x: self.size.width*0.05 - self.size.width/2, y: self.size.height*0.1 - self.size.height/2)
jump.zPosition = 2
jump.name = "jump"
cameraNode.addChild(jump)
Importend: jump is a child node from my cameraNode
My cameraNode:
self.camera = cameraNode
self.addChild(cameraNode)
let cameraNode = SKCameraNode()
let range = SKRange(constantValue: 0)
let cameraConstraint = SKConstraint.distance(range, to: player)
cameraNode.constraints = [cameraConstraint]
With this code "jump" isn't printed. I think I have to convert the touchPos to the same coordinate system like the cameraNodes or jump buttons system. My question: How can I convert view coordinates to my cameraNodes coordinate system?
P.S. I already tried the whole convert functions which didn't work. Maybe I just did it wrong.

SKNodes have a method called convertPoint
#objc func longPressGesture(longpressGest: UIGestureRecognizer) {
let touchPosinView = longpressGest.location(in: self.view)
let touchPos = self.convertPoint(fromView:touchPosinView )
if atPoint(touchPos).name == "jump" {
print("jump")
}
}

Related

Create a moving Button for a Game in Swift

I have an assignment where I need to create a game with SpriteKit.
It should have a button moving randomly within a view and if I click on it I get points.
The issue is that I have no idea how to create a button in SpriteKit.
Do I need to do a workaround by using a SKSpriteNode? But how would I make it look like a standard button? Or can I actually create a button somehow for that?
SpriteKit has no built-in SKButton class. but we can build some basic functionality. as sangony said you need three parts: draw the graphics (I'm using SKShapeNode for simplicity but you could use SKSpriteNode); move the node; add picking functionality. here is some code to illustrate.
Draw
add a SKShapeNode or SKSpriteNode to your SKNode class
shape = SKShapeNode(circleOfRadius: 40)
shape.fillColor = .green
addChild(shape)
Move
SKAction is very useful. here is an example that moves to a random position, then recursively calls itself. stop/go is regulated by a boolean flag.
func movement() {
print("movement")
let DURATION:CGFloat = 2.0
let random_x = CGFloat.random(in: -200...200)
let random_y = CGFloat.random(in: -200...200)
let random_point = CGPoint(x: random_x, y: random_y)
let move = SKAction.move(to: random_point, duration: DURATION)
move.timingMode = .easeInEaseOut
let wait = SKAction.wait(forDuration: DURATION)
let parallel = SKAction.group([move,wait])
let recursion = SKAction.run {
if self.isInMotion { self.movement() }
}
let serial = SKAction.sequence([parallel, recursion])
self.run(serial)
}
Pick
Testing for user clicks in a scene is called picking. I use a PickableNode protocol which helps filter out which SKNodes you want to be clickable.
extension GameScene {
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches{
//call `pick` on any `PickableNode` that exists at touch location
let location = touch.location(in: self)
let _ = self.nodes(at: location).map { ($0 as? PickableNode)?.pick() }
}
}
}
Here is the whole completed class
protocol PickableNode {
func pick()
}
class Button: SKNode, PickableNode {
let shape:SKShapeNode
var isInMotion:Bool = true
override init() {
shape = SKShapeNode(circleOfRadius: 40)
shape.fillColor = .green
super.init()
addChild(shape)
movement()
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
func movement() {
print("movement")
let DURATION:CGFloat = 2.0
let random_x = CGFloat.random(in: -200...200)
let random_y = CGFloat.random(in: -200...200)
let random_point = CGPoint(x: random_x, y: random_y)
let move = SKAction.move(to: random_point, duration: DURATION)
move.timingMode = .easeInEaseOut
let wait = SKAction.wait(forDuration: DURATION)
let parallel = SKAction.group([move,wait])
let recursion = SKAction.run {
if self.isInMotion { self.movement() }
}
let serial = SKAction.sequence([parallel, recursion])
self.run(serial)
}
func pick() {
print("i got picked")
if !isInMotion {
isInMotion = true
shape.fillColor = .green
movement()
} else {
isInMotion = false
shape.fillColor = .red
self.removeAllActions()
}
}
}

Move SCNNode forward when long pressing

Ive been trying to move a cube forward when the user long presses the screen. I can't get it to move smoothly. Is there any way I can move it only forward (in the negative direction) and update the position on each frame the scene?
Here is what I have so far:
var carLocation = SCNVector3(x:0, y: 0, z:-0.01)
func setupScene() {
sceneView = self.view as? SCNView
scene = SCNScene(named: "art.scnassets/Map.scn")
sceneView.scene = scene
let holdRecognizer = UILongPressGestureRecognizer()
holdRecognizer.addTarget(self, action: #selector(GameViewController.hold(recognizer:)))
sceneView.addGestureRecognizer(holdRecognizer)
}
#objc func hold(recognizer: UILongPressGestureRecognizer) {
print("Held down!")
let position = recognizer.location(in: sceneView)
carLocation = SCNVector3(x:0, y: 0, z:-0.01)
carNode.physicsBody?.velocity += carLocation
}
I put a count in your print, just so you could see that LongPress is going to repeat itself... 'a lot', which is probably not what you want long term. So, assuming you created something like:
let p = SCNPhysicsBody(type: .dynamic, shape: nil)
p.isAffectedByGravity = false
shipNode.physicsBody = p
Then this will move your object forward and backward smoothly.
#objc func hold(recognizer: UILongPressGestureRecognizer) {
print("Held down! \(count)")
count += 1
shipNode.physicsBody?.velocity = SCNVector3Make(0.0, 0, 0.51)
}
#objc func handleTap(_ gestureRecognize: UIGestureRecognizer) {
shipNode.physicsBody?.velocity = SCNVector3Make(0.0, 0, -0.51)
}
Unless you have a ton of objects in there, I can't see any reason for it to be slow. Set scnView.showsStatistics = true to see your FPS. Hopefully, you are not trying to run scenekit in the simulator?

SCNNode Plane not being correctly detected after rotation

I have an horizontal plane being detected. Once it's detected, I want that plane, which is a rectangle, to always appears horizontal (width > height).
this is the code I'm using to create this SCNNode:
func createFloorNode(anchor: ARPlaneAnchor) ->SCNNode {
let geometry = SCNPlane(width: CGFloat(anchor.extent.x), height: CGFloat(anchor.extent.z))
let floorNode = SCNNode(geometry: geometry)
floorNode.position = SCNVector3(anchor.center.x, 0, anchor.center.z)
floorNode.geometry?.firstMaterial?.diffuse.contents = UIColor.red
floorNode.geometry?.firstMaterial?.isDoubleSided = true
floorNode.eulerAngles = SCNVector3(-Double.pi/2, 0, 0)
if geometry.width < geometry.height {
floorNode.eulerAngles = SCNVector3(-Double.pi/2, -Double.pi/2, 0)
}
floorNode.name = floorNodeName
return floorNode
}
After this, I have a tapped gesture funtion added, which I want it to let me know if I have tapped inside my SCNNode:
#objc func tapped(sender: UITapGestureRecognizer) {
guard let sceneView = sender.view as? ARSCNView else { return }
let tapLocation = sender.location(in: sceneView)
let hitTest = sceneView.hitTest(tapLocation, types: .existingPlaneUsingExtent)
guard !hitTest.isEmpty else {
print("Not a plane")
return
}
print("Touched on the plane")
}
Problem is, after I rotated my scnnode, that functions keeps detecting the plane as the old one (before being detected). If I don't rotate it, it works fine.
Any ideas?

How to detect touch and show new SCNPlane using ARKit?

Now I am able to show different SCNPlane, when card detected. After displaying SCNPlanes, the user touches any plane to show new SCNPlane. But right now touch is working properly but new SCNPlane is not showing.
Here is the code I've tried:
var cake_1_PlaneNode : SCNNode? = nil
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else { return }
if let imageName = imageAnchor.referenceImage.name {
print(imageName)
if imageName == "menu" {
// Check To See The Detected Size Of Our menu Card (Should By 5cm*3cm)
let menuCardWidth = imageAnchor.referenceImage.physicalSize.width
let menuCardHeight = imageAnchor.referenceImage.physicalSize.height
print(
"""
We Have Detected menu Card With Name \(imageName)
\(imageName)'s Width Is \(menuCardWidth)
\(imageName)'s Height Is \(menuCardHeight)
""")
//raspberry
//cake 1
let cake_1_Plane = SCNPlane(width: 0.045, height: 0.045)
cake_1_Plane.firstMaterial?.diffuse.contents = UIImage(named: "france")
cake_1_Plane.cornerRadius = 0.01
let cake_1_PlaneNode = SCNNode(geometry: cake_1_Plane)
self.cake_1_PlaneNode = cake_1_PlaneNode
cake_1_PlaneNode.eulerAngles.x = -.pi/2
cake_1_PlaneNode.runAction(SCNAction.moveBy(x: 0.15, y: 0, z: -0.125, duration: 0.75))
node.addChildNode(cake_1_PlaneNode)
self.sceneView.scene.rootNode.addChildNode(node)
}
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let touch = touches.first as! UITouch
if(touch.view == self.sceneView){
//print("touch working")
let viewTouchLocation:CGPoint = touch.location(in: sceneView)
guard let result = sceneView.hitTest(viewTouchLocation, options: nil).first else {
return
}
if let planeNode = cake_1_PlaneNode, cake_1_PlaneNode == result.node{
print("match")
cake_1()
}
}
}
func cake_1() {
let plane = SCNPlane(width: 0.15 , height: 0.15)
plane.firstMaterial?.diffuse.contents = UIColor.black.withAlphaComponent(0.75)
let planeNodee = SCNNode(geometry: plane)
planeNodee.eulerAngles.x = -.pi / 2
planeNodee.runAction(SCNAction.moveBy(x: 0.21, y: 0, z: 0, duration: 0))
} //cake_1
Follow this link: Detect touch on SCNNode in ARKit.
Looking at your code I can see several issues (not to mention the naming conventions for your variables and methods).
Firstly, you are creating a Global Variable which you have declared like so:
var cake_1_PlaneNode : SCNNode? = nil
However you use both a Local and Global Variable for your cake_1_PlaneNode in yourDelegate Callback:
let cake_1_PlaneNode = SCNNode(geometry: cake_1_Plane)
self.cake_1_PlaneNode = cake_1_PlaneNode
Which should simply read like so:
self.cake_1_PlaneNode = SCNNode(geometry: cake_1_Plane)
Secondly, you are adding your cake_1_PlaneNode to the rootNode of your ARSCNView rather than your detected ARImageAnchor which is probably what you don't want to do, since when an ARAnchor is detected:
You can provide visual content for the anchor by attaching geometry
(or other SceneKit features) to this node or by adding child nodes.
As such, this method (unless you actually want to do it like this) is unnecessary.
The remaining issues lie within your cake_1 function itself.
Firstly you are not actually adding your planeNodee to your sceneHierachy.
Since you haven't specified whether or not the newly initialised planeNode should be added directly to your ARSCNView or as a childNode of your cake_1_planeNode your function should include one of the following:
self.sceneView.scene.rootNode.addChildNode(planeNodee)
self.cake_1_planeNode.addChildNode(planeNodee)
In addition there is probably also no need to rotate your planeNodee since by default an SCNPlane is rendered vertically.
Since you haven't stipulated where you will be placing your content, it could be that using -.pi / 2 is unnecessary, since this could make it virtually invisible to the naked eye.
One other issue which could also account for you not seeing your node, when you actually add it, is the Z position.
If you set 2 nodes at the same position you will likely experience an issue know as Z-fighting (which you can read more about here). As such you should probably move your added node forward slightly when adding it e.g. SCNVector3 (0,0,0.001)to account for this.
Based on all of these points, I have provided a fully working and commented example below:
import UIKit
import ARKit
//-------------------------
//MARK: - ARSCNViewDelegate
//-------------------------
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Check We Have An ARImageAnchor, Then Get It's Reference Image & Name
guard let imageAnchor = anchor as? ARImageAnchor else { return }
let detectedTarget = imageAnchor.referenceImage
guard let detectedTargetName = detectedTarget.name else { return }
//2. If We Have Detected Our Virtual Menu Then Add The CakeOnePlane
if detectedTargetName == "cakeMenu" {
let cakeOnePlaneGeometry = SCNPlane(width: 0.045, height: 0.045)
cakeOnePlaneGeometry.firstMaterial?.diffuse.contents = UIColor.cyan
cakeOnePlaneGeometry.cornerRadius = 0.01
let cakeOnPlaneNode = SCNNode(geometry: cakeOnePlaneGeometry)
cakeOnPlaneNode.eulerAngles.x = -.pi/2
//3. To Allow Us To Easily Keep Track Our Our Currently Added Node We Will Assign It A Unique Name
cakeOnPlaneNode.name = "Strawberry Cake"
node.addChildNode(cakeOnPlaneNode)
cakeOnPlaneNode.runAction(SCNAction.moveBy(x: 0.15, y: 0, z: 0, duration: 0.75))
}
}
}
class ViewController: UIViewController {
#IBOutlet var augmentedRealityView: ARSCNView!
let augmentedRealitySession = ARSession()
let configuration = ARWorldTrackingConfiguration()
//------------------
//MARK: - Life Cycle
//------------------
override func viewDidLoad() {
super.viewDidLoad()
setupARSession()
}
//-----------------
//MARK: - ARSession
//-----------------
/// Runs The ARSession
func setupARSession(){
//1. Load Our Detection Images
guard let detectionImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else { return }
//2. Configure & Run Our ARSession
augmentedRealityView.session = augmentedRealitySession
augmentedRealityView.delegate = self
configuration.detectionImages = detectionImages
augmentedRealitySession.run(configuration, options: [.resetTracking, .removeExistingAnchors])
}
//--------------------
//MARK: - Interaction
//--------------------
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
//1. Get The Current Touch Location & Perform An ARSCNHitTest To Check For Any Hit SCNNode's
guard let currentTouchLocation = touches.first?.location(in: self.augmentedRealityView),
let hitTestNode = self.augmentedRealityView.hitTest(currentTouchLocation, options: nil).first?.node else { return }
//2. If We Have Hit Our Strawberry Cake Then We Call Our makeCakeOnNode Function
if let cakeID = hitTestNode.name, cakeID == "Strawberry Cake"{
makeCakeOnNode(hitTestNode)
}
}
/// Adds An SCNPlane To A Detected Cake Target
///
/// - Parameter node: SCNNode
func makeCakeOnNode(_ node: SCNNode){
let planeGeometry = SCNPlane(width: 0.15 , height: 0.15)
planeGeometry.firstMaterial?.diffuse.contents = UIColor.black.withAlphaComponent(0.75)
let planeNode = SCNNode(geometry: planeGeometry)
planeNode.position = SCNVector3(0, 0, 0.001)
planeNode.runAction(SCNAction.moveBy(x: 0.21, y: 0, z: 0, duration: 0))
node.addChildNode(planeNode)
}
}
Which yields the following on my device:
For your information, this seems to show that your calculations for placing your content are off (unless of course this is the desired result).
As you can see, all of content rendered correctly, however the spacing of these was quite large, and as such you will likely need to pan your device somewhat to see it all when testing and developing further.
Hope it helps...
***Please use descriptive and clear names for your variables and functions, it is very hard to read and understand your code. You can read more about swift styling guidelines here: https://github.com/raywenderlich/swift-style-guide#naming
You are creating a new plane when the user touches the screen, but you are not adding that plane to the scene, therefore your "cake_1()" function only creates a new plane.
When ARKit detects an image, it automatically creates an empty node and adds it to our scene, at the center of the detected image. We must first keep a reference to the node ARKit has added for us when the image is detected.
Add this variable to the top of your class:
var detectedImageNode: SCNNode?
Then in func renderer(renderer: didAdd node:, for anchor:) add the following line:
detectedImageNode = node
Now that we have a reference to the node, we can easily add and remove other nodes.
Add the following line at the end of cake_1():
if let detectedImageNode = detectedImageNode {
cake_1_PlaneNode?.removeFromParentNode()
detectedImageNode.addChildNode(planeNodee)
}
Your final code should look like this:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else { return }
if let imageName = imageAnchor.referenceImage.name {
print(imageName)
if imageName == "menu" {
let cake_1_Plane = SCNPlane(width: 0.045, height: 0.045)
cake_1_Plane.firstMaterial?.diffuse.contents = UIImage(named: "france")
cake_1_Plane.cornerRadius = 0.01
let cake_1_PlaneNode = SCNNode(geometry: cake_1_Plane)
self.cake_1_PlaneNode = cake_1_PlaneNode
cake_1_PlaneNode.eulerAngles.x = -.pi/2
cake_1_PlaneNode.runAction(SCNAction.moveBy(x: 0.15, y: 0, z: -0.125, duration: 0.75))
node.addChildNode(cake_1_PlaneNode)
// No need to add the following line. The node is already added to the scene
//self.sceneView.scene.rootNode.addChildNode(node)
detectedImageNode = node
}
}
}
func cake_1() {
let plane = SCNPlane(width: 0.15 , height: 0.15)
plane.firstMaterial?.diffuse.contents = UIColor.black.withAlphaComponent(0.75)
let planeNodee = SCNNode(geometry: plane)
planeNodee.eulerAngles.x = -.pi / 2
if let detectedImageNode = detectedImageNode {
cake_1_PlaneNode?.removeFromParentNode()
detectedImageNode.addChildNode(planeNodee)
}
}
Alternative solution
If you are just trying to change the image of the plane then an easier way to approach this is to just change the texture of the plane.
Replace the contents of cake_1() with:
if let planeGeometry = cake_1_PlaneNode?.geometry {
planeGeometry.firstMaterial?.diffuse.contents = UIImage(named: "newImage")
}

SKShapeNode coordinates seem reversed when using UIPanGestureRecognizer

I've run into a problem with swift when creating a SKShapeNode where it seems as though my 'y' coordinate is inverted.
Initially I place my node at the bottom of the screen and added a physics body (which is placed correctly given that I've turned set skView.showsPhysics to true and verified this) but whenever i try to interact with the node with UIPanGestureRecognizer it will not move unless I interact with the very upper, opposite side of the screen. I've found that it will react no matter where the node is, but i must invert the Y location of my input.
Furthermore if i drag downward on the inverted location, the node will move downward, so in some sense it is not completely inverted in that it will still interact in the correct direction.
At this point I believe it has something to do with the different coordinate systems but I've been unable to isolate which part of my code is causing the problem. I've included my code below.
override func didMoveToView(view: SKView){
let gestureRecognizer = UIPanGestureRecognizer(target: self, action: Selector("handlePan:"))
self.view?.addGestureRecognizer(gestureRecognizer)
physicsWorld.gravity = CGVector(dx: 0.0, dy: -2.0)
physicsWorld.contactDelegate = self
let userShape = SKShapeNode(rectOfSize: CGSize(width: 30, height: 30))
userShape.position = CGPointMake(self.frame.size.width/2, userShape.frame.size.height/2)
userShape.name = "userShape"
userShape.fillColor = SKColor.blueColor()
userShape.physicsBody = SKPhysicsBody(rectangleOfSize: CGSize(width: 30, height: 30))
userShape.physicsBody?.dynamic = true
userShape.physicsBody?.affectedByGravity = false
userShape.physicsBody?.mass = 0.02
userShape.physicsBody?.friction = 0.0
userShape.physicsBody?.restitution = 0.5
addChild(userShape)
}
my handler
func handlePan(recognizer: UIPanGestureRecognizer!)
{
switch recognizer.state{
case .Began:
var touchLocation = recognizer.locationInView(recognizer.view!)
selectNodeForTouch(touchLocation)
case .Changed:
var translation = recognizer.translationInView(recognizer.view!)
translation = CGPointMake(translation.x, translation.y)
self.panForTranslation(translation)
recognizer.setTranslation(CGPointZero, inView: recognizer.view!)
case .Ended:
var velocity = recognizer.velocityInView(recognizer.view)
_selectedNode.physicsBody!.applyImpulse(CGVectorMake(velocity.x * velocityController, velocity.y * velocityController * -1.0))
default:
break;
}
}
func panForTranslation(translation: CGPoint)
{
var position = _selectedNode.position
print(_selectedNode.position)
_selectedNode.position = CGPointMake(position.x + translation.x, position.y - translation.y)
}
func selectNodeForTouch(touchLocation: CGPoint)//NODE SELECTOR
{
var touchedNode = nodeAtPoint(touchLocation)
_selectedNode = touchedNode
}
I've figured out that it does have to do with some sort of conversation of coordinate systems. In handlePan within the .begin statement i placed this line directly after creating the touchLocation variable
"touchLocation = self.convertPointFromView(touchLocation)" and it seems to have fixed the problem.