With ARKit I create boxes in my room and then I want to be able to exit AR scene to see my boxes and rotate around it.
I tried with allowsCameraControl = true and now I'm able to zoom and drag my objects but I would like the camera to turn around them while for now the object turns around me. I've looked the 604's video of WWDC which explains the option is self.sceneView.defaultCameraController.interactionMode = .orbitTurntable but I can't manage to make it work...
Basically all my 'box' are on the horizontal plane and I have a topBox which is at my ceiling.
What I've tried:
func getCenterBox() -> SCNNode? {
guard let _ = self.sceneView.scene.rootNode.childNode(withName: "box", recursively: true) else {
return nil
}
let edges = self.sceneView.scene.rootNode.childNodes.filter { $0.name == "box" }
let edgesByX = edges.sorted { $0.position.x < $1.position.x }
let minX = edgesByX.first?.position.x
let maxX = edgesByX.last?.position.x
let edgesByZ = edges.sorted { $0.position.z < $1.position.z }
let minZ = edgesByZ.first?.position.z
let maxZ = edgesByZ.last?.position.z
let centerBox = SCNBox(width: 1, height: 1, length: 1, chamferRadius: 0.5)
let centerNode = SCNNode(geometry: centerBox)
let centerNodePosition = SCNVector3Make((maxX! - minX!)/2 + minX!, ((topBox?.position.y)! - (edges.first?.position.y)!)/2 + (edges.first?.position.y)!, (maxZ! - minZ!)/2 + minZ!)
centerNode.position = centerNodePosition
return SCNNode(geometry: SCNGeometry())
}
#IBAction func stopARTapped(_ sender: UIButton) {
self.sceneView.allowsCameraControl = true
self.sceneView.defaultCameraController.interactionMode = .orbitTurntable
self.sceneView.defaultCameraController.inertiaEnabled = true
self.sceneView.defaultCameraController.maximumHorizontalAngle = 0
self.sceneView.defaultCameraController.maximumVerticalAngle = 0
self.sceneView.defaultCameraController.minimumHorizontalAngle = 0
self.sceneView.defaultCameraController.minimumVerticalAngle = 0
guard let centerNode = getCenterBox() else {
return
}
self.sceneView.scene.rootNode.addChildNode(centerNode)
let lookAtConstraint = SCNLookAtConstraint(target: centerNode)
if self.sceneView.pointOfView?.constraints == nil {
self.sceneView.pointOfView?.constraints = [lookAtConstraint]
} else {
self.sceneView.pointOfView?.constraints?.append(lookAtConstraint)
}
}
Related
I use AVCaptureSession to setup a camera view and using vision kit to detect and add a rectangular on the face.
Here is how I can do it
override func viewDidLoad() {
super.viewDidLoad()
self.prepareVisionRequest()
}
fileprivate func prepareVisionRequest() {
//self.trackingRequests = []
var requests = [VNTrackObjectRequest]()
let faceDetectionRequest = VNDetectFaceRectanglesRequest(completionHandler: { (request, error) in
if error != nil {
print("FaceDetection error: \(String(describing: error)).")
}
guard let faceDetectionRequest = request as? VNDetectFaceRectanglesRequest,
let results = faceDetectionRequest.results else {
return
}
DispatchQueue.main.async {
// Add the observations to the tracking list
for observation in results {
let faceTrackingRequest = VNTrackObjectRequest(detectedObjectObservation: observation)
faceTrackingRequest.trackingLevel = .fast
requests.append(faceTrackingRequest)
}
self.trackingRequests = requests
}
})
// Start with detection. Find face, then track it.
self.detectionRequests = [faceDetectionRequest]
self.sequenceRequestHandler = VNSequenceRequestHandler()
self.setupVisionDrawingLayers()
}
// MARK: Drawing Vision Observations
fileprivate func setupVisionDrawingLayers() {
let captureDeviceResolution = self.captureDeviceResolution
let captureDeviceBounds = CGRect(x: 0,
y: 0,
width: captureDeviceResolution.width,
height: captureDeviceResolution.height)
let captureDeviceBoundsCenterPoint = CGPoint(x: captureDeviceBounds.midX,
y: captureDeviceBounds.midY)
let normalizedCenterPoint = CGPoint(x: 0.5, y: 0.5)
guard let rootLayer = self.rootLayer else {
self.presentErrorAlert(message: "view was not property initialized")
return
}
let overlayLayer = CALayer()
overlayLayer.name = "DetectionOverlay"
overlayLayer.masksToBounds = true
overlayLayer.anchorPoint = normalizedCenterPoint
overlayLayer.bounds = captureDeviceBounds
overlayLayer.position = CGPoint(x: rootLayer.bounds.midX, y: rootLayer.bounds.midY)
let faceRectangleShapeLayer = CAShapeLayer()
faceRectangleShapeLayer.name = "RectangleOutlineLayer"
faceRectangleShapeLayer.bounds = captureDeviceBounds
faceRectangleShapeLayer.anchorPoint = normalizedCenterPoint
faceRectangleShapeLayer.position = captureDeviceBoundsCenterPoint
faceRectangleShapeLayer.fillColor = UIColor.white.withAlphaComponent(0.9).cgColor
// faceLandmarksShapeLayer.strokeColor = UIColor.white.withAlphaComponent(0.7).cgColor
faceRectangleShapeLayer.lineWidth = 5
faceRectangleShapeLayer.shadowOpacity = 0.7
faceRectangleShapeLayer.shadowRadius = 5
let faceLandmarksShapeLayer = CAShapeLayer()
faceLandmarksShapeLayer.name = "FaceLandmarksLayer"
faceLandmarksShapeLayer.bounds = captureDeviceBounds
faceLandmarksShapeLayer.anchorPoint = normalizedCenterPoint
faceLandmarksShapeLayer.position = captureDeviceBoundsCenterPoint
faceLandmarksShapeLayer.fillColor = nil
faceLandmarksShapeLayer.strokeColor = nil
//
overlayLayer.addSublayer(faceRectangleShapeLayer)
faceRectangleShapeLayer.addSublayer(faceLandmarksShapeLayer)
rootLayer.addSublayer(overlayLayer)
self.detectionOverlayLayer = overlayLayer
self.detectedFaceRectangleShapeLayer = faceRectangleShapeLayer
self.detectedFaceLandmarksShapeLayer = faceLandmarksShapeLayer
self.updateLayerGeometry()
}
Now, I'm trying three ways to take snapshots
1- using UIGraphicsImageRenderer, it shows only the rectangular on the face and the camera view in not visible - it's black
2- Take image from captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) - the image from the buffer shows only the camera view, no rectangular
3- use AVCapturePhotoCaptureDelegate to capture photo from AVCaptureSession, the captured photo shows only the camera view, no rectangular
Could you please help me to take snapshot that contains both camera view and the rectangular! Thanks
I create a sphere node, I need the user to be able only to rotate (left / right, up / down) and zoom in / out the node, but default he can move the node from the center (with two fingers) - is possible prohibit the user to move the node from the center? thanks for any help
sceneView.scene = scene
cameraOrbit = SCNNode()
cameraNode = SCNNode()
camera = SCNCamera()
// camera stuff
camera.usesOrthographicProjection = true
camera.orthographicScale = 5
camera.zNear = 1
camera.zFar = 100
cameraNode.position = SCNVector3(x: 0, y: 0, z: 70)
cameraNode.camera = camera
cameraOrbit = SCNNode()
cameraOrbit.addChildNode(cameraNode)
scene.rootNode.addChildNode(cameraNode)
let sphere = SCNSphere(radius: 2)
sphere.firstMaterial?.diffuse.contents = UIColor.red
let earthNode = SCNNode(geometry: sphere)
earthNode.name = "sphere"
earthNode.geometry?.materials = [blueMaterial]
scene.rootNode.addChildNode(earthNode)
earthNode.rotation = SCNVector4(0, 1, 0, 0)
let lightNode = SCNNode()
let light = SCNLight()
light.type = .ambient
light.intensity = 200
lightNode.light = light
scene.rootNode.addChildNode(lightNode)
sceneView.allowsCameraControl = true
sceneView.backgroundColor = UIColor.clear
sceneView.cameraControlConfiguration.allowsTranslation = true
sceneView.cameraControlConfiguration.rotationSensitivity = 0.4
You can put similar code into your UIViewController:
//**************************************************************************
// Gesture Recognizers
// MARK: Gesture Recognizers
//**************************************************************************
#objc func handleTap(recognizer: UITapGestureRecognizer)
{
if(data.isNavigationOff == true) { return } // No panel select if Add, Update, EndWave, or EndGame
if(gameMenuTableView.isHidden == false) { return } // No panel if game menu is showing
let location: CGPoint = recognizer.location(in: gameScene)
if(data.isAirStrikeModeOn == true)
{
let projectedPoint = gameScene.projectPoint(SCNVector3(0, 0, 0))
let scenePoint = gameScene.unprojectPoint(SCNVector3(location.x, location.y, CGFloat(projectedPoint.z)))
gameControl.airStrike(position: scenePoint)
}
else
{
let hitResults = gameScene.hitTest(location, options: hitTestOptions)
for vHit in hitResults
{
if(vHit.node.name?.prefix(5) == "Panel")
{
// May have selected an invalid panel or auto upgrade was on
if(gameControl.selectPanel(vPanel: vHit.node.name!) == false) { return }
return
}
}
}
}
//**************************************************************************
#objc func handlePan(recognizer: UIPanGestureRecognizer)
{
if(data.gameState != .run || data.isGamePaused == true) { return }
currentLocation = recognizer.location(in: gameScene)
switch recognizer.state
{
case UIGestureRecognizer.State.began:
beginLocation = recognizer.location(in: gameScene)
break
case UIGestureRecognizer.State.changed:
if(currentLocation.x > beginLocation.x * 1.1)
{
beginLocation.x = currentLocation.x
gNodes.camera.strafeLeft()
}
if(currentLocation.x < beginLocation.x * 0.9)
{
beginLocation.x = currentLocation.x
gNodes.camera.strafeRight()
}
break
case UIGestureRecognizer.State.ended:
break
default:
break
}
}
Yes, all of that is doable. First, create your own camera class and turn off allowsCameraControl. Then you can implement zoom/strafe/whatever.
Here are some examples that may help, just search for these numbers in the stack search bar and find my answers/examples.
57018359 - this post one tells you how to touch a 2d screen (tap) and translate it to 3d coordinates with you deciding the depth (z), like if you wanted to tap the screen and place an object in 3d space.
57003908 - this post tells you how to select an object with a hitTest (tap). For example, if you showed the front of a house with a door and tap it, then the function would return your door node provided you name the node "door" and took some kind of action when it's touched. Then you could reposition your camera based on that position. You'll want to go iterate through all results because there might be overlapping or plus Z nodes
55129224 - this post gives you quick example of creating a camera class. You can use this to reposition your camera or move it forward and back, etc.
Two finger drag:
func dragBegins(vRecognizer: UIPanGestureRecognizer)
{
if(data.gameState == .run)
{
if(vRecognizer.numberOfTouches == 2) { dragMode = .strafe }
}
}
class Camera
{
var data = Data.sharedInstance
var util = Util.sharedInstance
var gameDefaults = Defaults()
var cameraEye = SCNNode()
var cameraFocus = SCNNode()
var centerX: Int = 100
var strafeDelta: Float = 0.8
var zoomLevel: Int = 35
var zoomLevelMax: Int = 35 // Max number of zoom levels
//********************************************************************
init()
{
cameraEye.name = "Camera Eye"
cameraFocus.name = "Camera Focus"
cameraFocus.isHidden = true
cameraFocus.position = SCNVector3(x: 0, y: 0, z: 0)
cameraEye.camera = SCNCamera()
cameraEye.constraints = []
cameraEye.position = SCNVector3(x: 0, y: 15, z: 0.1)
let vConstraint = SCNLookAtConstraint(target: cameraFocus)
vConstraint.isGimbalLockEnabled = true
cameraEye.constraints = [vConstraint]
}
//********************************************************************
func reset()
{
centerX = 100
cameraFocus.position = SCNVector3(x: 0, y: 0, z: 0)
cameraEye.constraints = []
cameraEye.position = SCNVector3(x: 0, y: 32, z: 0.1)
cameraFocus.position = SCNVector3Make(0, 0, 0)
let vConstraint = SCNLookAtConstraint(target: cameraFocus)
vConstraint.isGimbalLockEnabled = true
cameraEye.constraints = [vConstraint]
}
//********************************************************************
func strafeRight()
{
if(centerX + 1 < 112)
{
centerX += 1
cameraEye.position.x += strafeDelta
cameraFocus.position.x += strafeDelta
}
}
//********************************************************************
func strafeLeft()
{
if(centerX - 1 > 90)
{
centerX -= 1
cameraEye.position.x -= strafeDelta
cameraFocus.position.x -= strafeDelta
}
}
//********************************************************************
}
//********************************************************************
func lerp(start: SCNVector3, end: SCNVector3, percent: Float) -> SCNVector3
{
let v3 = cgVecSub(v1: end, v2: start)
let v4 = cgVecScalarMult(v: v3, s: percent)
return cgVecAdd(v1: start, v2: v4)
}
Good Morning,
How can I get the location of a contact between two Physicsbody via allContactedBodies?
Because of the structure of my App I cant use touchesBegan method from the Physicsbody but have to check it manually via allContactedBodies() from a Spritekit. But is there a method to get also the point where the touch occurred?
This is how I check for a contact but now I need also the position of the contact
if let unwrapped_allContactedBodies = spriteObject.spriteNode.physicsBody?.allContactedBodies() {
if spriteObject.spriteNode.physicsBody?.allContactedBodies().count ?? 0 > 0 {
return checkForContact(contactedBodies: unwrapped_allContactedBodies, parameter: value)
} else {
return 0.0
}
} else {
return 0.0
} ยดยดยด
Any Ideas?
You can use the contact.contactPoint property. You will have to implement the SKPhysicsContactDelegate.
Example:
enum CollisionTypes: UInt32{
case nodeAColliding = 1
case nodeBColliding = 2
}
class GameScene: SKScene, SKPhysicsContactDelegate {
override func didMove(to view: SKView) {
physicsWorld.contactDelegate = self
}
func createNodes(){
let nodeA = SKSpriteNode(imageNamed: "nodeAImage")
nodeA.name = "nodeA"
nodeA.zPosition = 2
nodeA.physicsBody = SKPhysicsBody(rectangleOf: CGSize(width: 50 , height: 50))
nodeA.position = //some CGpoint
nodeA.physicsBody?.categoryBitMask = CollisionTypes.nodeAColliding.rawValue
nodeA.physicsBody?.contactTestBitMask = CollisionTypes.nodeBColliding.rawValue
nodeA.physicsBody?.collisionBitMask = CollisionTypes.nodeBColliding.rawValue
addChild(nodeA)
let nodeB = SKSpriteNode(imageNamed: "nodeBImage")
nodeB.name = "nodeB"
nodeB.zPosition = 2
nodeB.physicsBody = SKPhysicsBody(rectangleOf: CGSize(width: 50 , height: 50))
nodeB.position = //some CGpoint
nodeB.physicsBody?.categoryBitMask = CollisionTypes.nodeBColliding.rawValue
nodeB.physicsBody?.contactTestBitMask = CollisionTypes.nodeAColliding.rawValue
nodeB.physicsBody?.collisionBitMask = CollisionTypes.nodeAColliding.rawValue
addChild(nodeB)
}
func didBegin(_ contact: SKPhysicsContact){
guard let contactedNodeA = contact.bodyA.node else {return}
guard let contactedNodeB = contact.bodyB.node else {return}
print(contact.contactPoint)
if contactedNodeA.name == "nodeA"{
//Do something
}
if contactedNodeA.name == "nodeB"{
//Do something
}
}
}
note: I have not tested this code in the compiler.
Some background about my app: I am drawing a map. When the user moves the map I perform a database query. I first do an rTree query to find the features that would be draw in the current viewport. Once I have those IDs I perform a second database query to extract the features (geojson) from the database. I do a quick check to see if the item already has been drawn, if not I do a addChild to render the feature on the map. I want to do these database looks up in the background via GCD so the user can move the map smoothly. I've implemented this but the memory usage quickly grows to 1gb, whereas if I do all the work in the main thread it uses around 250mb (acceptable for me). I'm assuming something is not being cleaned up because of the closure use. Any insight into the cause of the memory leak is appreciated.
public func drawItemsInBox(boundingBox: [Double]) {
DispatchQueue.global(qos: .background).async { [weak self] in
guard let self = self else {
return
}
var drawItems: [Int64] = []
let table = Table("LNDARE_XS")
let tableRTree = Table("LNDARE_XS_virtual")
let coords = Expression<String?>("coords")
let foid = Expression<String>("foid")
let rTree = Expression<Int64>("rTree")
let minX = Expression<Double>("minX")
let maxX = Expression<Double>("maxX")
let minY = Expression<Double>("minY")
let maxY = Expression<Double>("maxY")
let id = Expression<Int64>("id")
// find all the features to draw via an rTree query
for row in try! self.db.prepare(tableRTree.filter(maxX >= boundingBox[0] && minX <= boundingBox[1] && maxY >= boundingBox[2] && minY <= boundingBox[3])) {
drawItems.append(row[id])
}
do {
// get all the features geojson data
let query = table.filter(drawItems.contains(rTree))
for row in try self.db.prepare(query) {
// skip drawing if the feature already exists on the map
if self.featureTracking["LNDARE_XS"]?[Int64(row[foid])!] == nil {
// convert the database string to an array of coords
var toBeRendered:[CGPoint] = []
let coordsArray = row[coords]!.components(separatedBy: ",")
for i in 0...(coordsArray.count / 2) - 1 {
toBeRendered.append(CGPoint(x: (Double(coordsArray[i*2])!), y: (Double(coordsArray[(i*2)+1])!)))
}
let linearShapeNode = SKShapeNode(points: &toBeRendered, count: toBeRendered.count)
linearShapeNode.position = CGPoint(x: self.frame.midX, y: self.frame.midY)
linearShapeNode.lineWidth = 0
linearShapeNode.fillColor = NSColor.black
// append the featureId for tracking and call addChild to draw
self.scaleLayer.addChild(linearShapeNode)
self.featureTracking["LNDARE_XS"]?[Int64(row[foid])!] = linearShapeNode
}
}
} catch {
// catch
}
}
}
Maybe change toBeRendered can save some:
var toBeRendered:[CGPoint] = []
for row in try self.db.prepare(query) {
// skip drawing if the feature already exists on the map
if self.featureTracking["LNDARE_XS"]?[Int64(row[foid])!] == nil {
// convert the database string to an array of coords
toBeRendered.removeAll()
let coordsArray = row[coords]!.components(separatedBy: ",")
for i in 0...(coordsArray.count / 2) - 1 {
toBeRendered.append(CGPoint(x: (Double(coordsArray[i*2])!), y: (Double(coordsArray[(i*2)+1])!)))
}
Maybe try using an auto release pool since you are not on the main thread
public func drawItemsInBox(boundingBox: [Double]) {
DispatchQueue.global(qos: .background).async { [weak self] in
guard let self = self else {
return
}
var drawItems: [Int64] = []
let table = Table("LNDARE_XS")
let tableRTree = Table("LNDARE_XS_virtual")
let coords = Expression<String?>("coords")
let foid = Expression<String>("foid")
let rTree = Expression<Int64>("rTree")
let minX = Expression<Double>("minX")
let maxX = Expression<Double>("maxX")
let minY = Expression<Double>("minY")
let maxY = Expression<Double>("maxY")
let id = Expression<Int64>("id")
// find all the features to draw via an rTree query
for row in try! self.db.prepare(tableRTree.filter(maxX >= boundingBox[0] && minX <= boundingBox[1] && maxY >= boundingBox[2] && minY <= boundingBox[3])) {
drawItems.append(row[id])
}
do {
// get all the features geojson data
let query = table.filter(drawItems.contains(rTree))
for row in try self.db.prepare(query) {
autoreleasepool{
// skip drawing if the feature already exists on the map
if self.featureTracking["LNDARE_XS"]?[Int64(row[foid])!] == nil {
// convert the database string to an array of coords
var toBeRendered:[CGPoint] = []
let coordsArray = row[coords]!.components(separatedBy: ",")
for i in 0...(coordsArray.count / 2) - 1 {
toBeRendered.append(CGPoint(x: (Double(coordsArray[i*2])!), y: (Double(coordsArray[(i*2)+1])!)))
}
let linearShapeNode = SKShapeNode(points: &toBeRendered, count: toBeRendered.count)
linearShapeNode.position = CGPoint(x: self.frame.midX, y: self.frame.midY)
linearShapeNode.lineWidth = 0
linearShapeNode.fillColor = NSColor.black
// append the featureId for tracking and call addChild to draw
self.scaleLayer.addChild(linearShapeNode)
self.featureTracking["LNDARE_XS"]?[Int64(row[foid])!] = linearShapeNode
}
}
}
} catch {
// catch
}
}
}
the app I'm working on is supposed to show a 3D object and the user can pick a color to color it. I have a SCNScene with multiple mesh creating a 3D model. I need to build a side interactive panel with colors the user can use to color the 3D model. The code is here on github.
I show you my code (for now on one class only, that's bad i know)
import UIKit
import QuartzCore
import SceneKit
import SpriteKit
class GameViewController: UIViewController {
var cameraOrbit = SCNNode()
let cameraNode = SCNNode()
let camera = SCNCamera()
let floorNode = SCNNode()
var wallNode = SCNNode()
var lateralWallRight = SCNNode()
var lateralWallLeft = SCNNode()
var spotLightNode = SCNNode()
//HANDLE PAN CAMERA
var initialPositionCamera = SCNVector3(x: -25, y: 70, z: 1450)
var translateEnabled = false
var lastXPos:Float = 0.0
var lastYPos:Float = 0.0
var xPos:Float = 0.0
var yPos:Float = 0.0
var lastWidthRatio: Float = 0
var lastHeightRatio: Float = 0.1
var widthRatio: Float = 0
var heightRatio: Float = 0.1
var fingersNeededToPan = 1 //change this from GUI
var panAttenuation: Float = 10 //5.0: very fast ---- 40.0 very slow
let maxWidthRatioRight: Float = 0.2
let maxWidthRatioLeft: Float = -0.2
let maxHeightRatioXDown: Float = 0.065
let maxHeightRatioXUp: Float = 0.4
//HANDLE PINCH CAMERA
var pinchAttenuation = 1.0 //1.0: very fast ---- 100.0 very slow
var lastFingersNumber = 0
let maxPinch = 146.0
let minPinch = 40.0
//OVERLAY
var colorPanelScene = SKScene()
var pickedColor: UIColor = UIColor.whiteColor()
var NodesToColors = [SKSpriteNode: UIColor]()
var didPickColor = false
var OverlayBackground: SKSpriteNode = SKSpriteNode()
func setColors() {
//Color Setup
let ColorWhite = colorPanelScene.childNodeWithName("ColorWhite") as! SKSpriteNode
let ColorRed = colorPanelScene.childNodeWithName("ColorRed") as! SKSpriteNode
let ColorBrown = colorPanelScene.childNodeWithName("ColorBrown")as! SKSpriteNode
let ColorDarkBrown = colorPanelScene.childNodeWithName("ColorDarkBrown")as! SKSpriteNode
let white = UIColor(red:1, green:0.95, blue:0.71, alpha:1)
let brown = UIColor(red:0.49, green:0.26, blue:0.17, alpha:1)
let red = UIColor(red:0.67, green:0.32, blue:0.21, alpha:1)
let darkBrown = UIColor(red:0.27, green:0.25, blue:0.21, alpha:1)
NodesToColors = [
ColorWhite: white,
ColorRed: red,
ColorBrown: brown,
ColorDarkBrown: darkBrown
]
OverlayBackground = colorPanelScene.childNodeWithName("OverlayBackground")as! SKSpriteNode
}
func blur(image image: UIImage) -> UIImage {
let radius: CGFloat = 20;
let context = CIContext(options: nil);
let inputImage = CIImage(CGImage: image.CGImage!);
let filter = CIFilter(name: "CIGaussianBlur");
filter?.setValue(inputImage, forKey: kCIInputImageKey);
filter?.setValue("\(radius)", forKey:kCIInputRadiusKey);
let result = filter?.valueForKey(kCIOutputImageKey) as! CIImage;
let rect = CGRectMake(radius * 2, radius * 2, image.size.width - radius * 4, image.size.height - radius * 4)
let cgImage = context.createCGImage(result, fromRect: rect);
let returnImage = UIImage(CGImage: cgImage);
return returnImage;
}
override func viewDidLoad() {
super.viewDidLoad()
// create a new scene
let scene = SCNScene(named: "art.scnassets/Figure.scn")!
// MARK: Lights
//create and add a light to the scene
let lightNode = SCNNode()
lightNode.light = SCNLight()
lightNode.light!.type = SCNLightTypeOmni
lightNode.position = SCNVector3(x: 0, y: 1000, z: 1000)
scene.rootNode.addChildNode(lightNode)
// create and add an ambient light to the scene
let ambientLightNode = SCNNode()
ambientLightNode.light = SCNLight()
ambientLightNode.light!.type = SCNLightTypeAmbient
ambientLightNode.light!.color = UIColor.darkGrayColor()
scene.rootNode.addChildNode(ambientLightNode)
//MARK: Camera
camera.usesOrthographicProjection = true
camera.orthographicScale = 100
camera.zNear = 10
camera.zFar = 3000
cameraNode.position = initialPositionCamera
cameraNode.camera = camera
cameraOrbit = SCNNode()
cameraOrbit.addChildNode(cameraNode)
scene.rootNode.addChildNode(cameraOrbit)
//initial camera setup
self.cameraOrbit.eulerAngles.y = Float(-2 * M_PI) * lastWidthRatio
self.cameraOrbit.eulerAngles.x = Float(-M_PI) * lastHeightRatio
lastXPos = self.cameraNode.position.x
lastYPos = self.cameraNode.position.y
//MARK: Floor
let floor = SCNFloor()
floor.reflectionFalloffEnd = 0
floor.reflectivity = 0
floorNode.geometry = floor
floorNode.name = "Floor"
floorNode.geometry!.firstMaterial!.diffuse.contents = "art.scnassets/floor.png"
floorNode.geometry!.firstMaterial!.locksAmbientWithDiffuse = true
floorNode.geometry!.firstMaterial!.diffuse.wrapS = SCNWrapMode.Repeat
floorNode.geometry!.firstMaterial!.diffuse.wrapT = SCNWrapMode.Repeat
floorNode.geometry!.firstMaterial!.diffuse.mipFilter = SCNFilterMode.Nearest
floorNode.geometry!.firstMaterial!.doubleSided = false
floorNode.castsShadow = true
scene.rootNode.addChildNode(floorNode)
//MARK: Walls
// create the wall geometry
let wallGeometry = SCNPlane.init(width: 500.0, height: 300.0)
wallGeometry.firstMaterial!.diffuse.contents = "art.scnassets/background.jpg"
wallGeometry.firstMaterial!.diffuse.mipFilter = SCNFilterMode.Nearest
wallGeometry.firstMaterial!.diffuse.wrapS = SCNWrapMode.Repeat
wallGeometry.firstMaterial!.diffuse.wrapT = SCNWrapMode.Repeat
wallGeometry.firstMaterial!.doubleSided = false
wallGeometry.firstMaterial!.locksAmbientWithDiffuse = true
wallNode = SCNNode.init(geometry: wallGeometry)
wallNode.name = "FrontWall"
wallNode.position = SCNVector3Make(0, 120, -300) //this moves all 3 walls
wallNode.castsShadow = true
// RIGHT LATERAL WALL
lateralWallRight = SCNNode.init(geometry: wallGeometry)
lateralWallRight.name = "lateralWallRight"
lateralWallRight.position = SCNVector3Make(-300, -20, 150);
lateralWallRight.rotation = SCNVector4(x: 0, y: 1, z: 0, w: Float(M_PI/3))
lateralWallRight.castsShadow = true
wallNode.addChildNode(lateralWallRight)
// LEFT LATERAL WALL
lateralWallLeft = SCNNode.init(geometry: wallGeometry)
lateralWallLeft.name = "lateralWallLeft"
lateralWallLeft.position = SCNVector3Make(300, -20, 150);
lateralWallLeft.rotation = SCNVector4(x: 0, y: -1, z: 0, w: Float(M_PI/3))
lateralWallLeft.castsShadow = true
wallNode.addChildNode(lateralWallLeft)
//front walls
scene.rootNode.addChildNode(wallNode)
// retrieve the SCNView
let scnView = self.view as! SCNView
// set the scene to the view
scnView.scene = scene
// allows the user to manipulate the camera
scnView.allowsCameraControl = false //not needed
// configure the view
scnView.backgroundColor = UIColor.grayColor()
//MARK: Gesture Recognizer in SceneView
// add a pan gesture recognizer
let panGesture = UIPanGestureRecognizer(target: self, action: #selector(GameViewController.handlePan(_:)))
scnView.addGestureRecognizer(panGesture)
// add a tap gesture recognizer
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(GameViewController.handleTap(_:)))
scnView.addGestureRecognizer(tapGesture)
// add a pinch gesture recognizer
let pinchGesture = UIPinchGestureRecognizer(target: self, action: #selector(GameViewController.handlePinch(_:)))
scnView.addGestureRecognizer(pinchGesture)
//MARK: OverLay
colorPanelScene = SKScene(fileNamed: "art.scnassets/ColorPanelScene")!
scnView.overlaySKScene = colorPanelScene
scnView.overlaySKScene!.userInteractionEnabled = true;
didPickColor = false
setColors()
//let OverlayBackground = colorPanelScene.childNodeWithName("OverlayBackground")as! SKSpriteNode
}
func handlePan(gestureRecognize: UIPanGestureRecognizer) {
let numberOfTouches = gestureRecognize.numberOfTouches()
let translation = gestureRecognize.translationInView(gestureRecognize.view!)
if (numberOfTouches==fingersNeededToPan) {
widthRatio = Float(translation.x) / Float(gestureRecognize.view!.frame.size.width) + lastWidthRatio
heightRatio = Float(translation.y) / Float(gestureRecognize.view!.frame.size.height) + lastHeightRatio
// HEIGHT constraints
if (heightRatio >= maxHeightRatioXUp ) {
heightRatio = maxHeightRatioXUp
}
if (heightRatio <= maxHeightRatioXDown ) {
heightRatio = maxHeightRatioXDown
}
// WIDTH constraints
if(widthRatio >= maxWidthRatioRight) {
widthRatio = maxWidthRatioRight
}
if(widthRatio <= maxWidthRatioLeft) {
widthRatio = maxWidthRatioLeft
}
self.cameraOrbit.eulerAngles.y = Float(-2 * M_PI) * widthRatio
self.cameraOrbit.eulerAngles.x = Float(-M_PI) * heightRatio
lastFingersNumber = fingersNeededToPan
//TRANSLATION pan
} else if numberOfTouches == (fingersNeededToPan+1) {
if translateEnabled {
xPos = (lastXPos + Float(-translation.x))/(panAttenuation)
yPos = (lastYPos + Float(translation.y))/(panAttenuation)
self.cameraNode.position.x = xPos
self.cameraNode.position.y = yPos
}
lastFingersNumber = fingersNeededToPan+1
}
if (lastFingersNumber == fingersNeededToPan && numberOfTouches != fingersNeededToPan) {
lastWidthRatio = widthRatio
lastHeightRatio = heightRatio
}
if lastFingersNumber != (fingersNeededToPan+1) && numberOfTouches != (fingersNeededToPan+1) {
lastXPos = xPos
lastYPos = yPos
}
if (gestureRecognize.state == .Ended) {
if (lastFingersNumber==fingersNeededToPan) {
lastWidthRatio = widthRatio
lastHeightRatio = heightRatio
//print("lastHeight: \(round(lastHeightRatio*100))")
//print("lastWidth: \(round(lastWidthRatio*100))")
}
if lastFingersNumber==(fingersNeededToPan+1) {
lastXPos = xPos
lastYPos = yPos
print("lastX: \(xPos)")
print("lastY: \(yPos)")
}
print("Pan with \(lastFingersNumber) finger\(lastFingersNumber>1 ? "s" : "")")
}
}
func handlePinch(gestureRecognize: UIPinchGestureRecognizer) {
let pinchVelocity = Double.init(gestureRecognize.velocity)
//print("PinchVelocity \(pinchVelocity)")
camera.orthographicScale -= (pinchVelocity/pinchAttenuation)
if camera.orthographicScale <= minPinch {
camera.orthographicScale = minPinch
}
if camera.orthographicScale >= maxPinch {
camera.orthographicScale = maxPinch
}
if (gestureRecognize.state == .Ended) {
print("\nPinch: \(round(camera.orthographicScale))\n")
}
}
func handleTap(gestureRecognize: UIGestureRecognizer) {
print("---------------TAP-----------------")
// retrieve the SCNView
let scnView = self.view as! SCNView
let touchedPointInScene = gestureRecognize.locationInView(scnView)
let hitResults = scnView.hitTest(touchedPointInScene, options: nil)
let OverlayView = colorPanelScene.view! as SKView
let touchedPointInOverlay = gestureRecognize.locationInView(OverlayView)
// if button color are touched
if OverlayBackground.containsPoint(touchedPointInOverlay) {
print("OVERLAY: tap in \(touchedPointInOverlay)")
for (node, color) in NodesToColors {
// Check if the location of the touch is within the button's bounds
if node.containsPoint(touchedPointInOverlay) {
print("\(node.name!) -> color picked \(color.description)")
pickedColor = color
didPickColor = true
}
}
} else {//if figure is touched
// check that we clicked on at least one object
if hitResults.count > 0 && didPickColor {
// retrieved the first clicked object
let result: AnyObject! = hitResults[0]
print("OBJECT tap: \(result.node.name!)")
//Exclude floor and wall from color
if result.node! != floorNode && result.node! != wallNode && result.node! != lateralWallRight && result.node! != lateralWallLeft {
// get its material
let material = result.node!.geometry!.firstMaterial!
print("material: \(material.name!)")
// begin coloration
SCNTransaction.begin()
SCNTransaction.setAnimationDuration(0.5)
// on completion - keep color
SCNTransaction.setCompletionBlock {
SCNTransaction.begin()
SCNTransaction.setAnimationDuration(0.3)
material.diffuse.contents = self.pickedColor
SCNTransaction.commit()
}
SCNTransaction.commit()
material.diffuse.contents = pickedColor
}
}
}
print("-----------------------------------\n")
}
override func prefersStatusBarHidden() -> Bool {
return true
}
override func supportedInterfaceOrientations() -> UIInterfaceOrientationMask {
return .Landscape
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Release any cached data, images, etc that aren't in use.
}
The code starts with a setColor function that catch images from ColorPanelScene.sks (this SKScene has a strange y-axis movement, i don't know why).
func setColors() {
//Color Setup
let ColorWhite = colorPanelScene.childNodeWithName("ColorWhite") as! SKSpriteNode
let ColorRed = colorPanelScene.childNodeWithName("ColorRed") as! SKSpriteNode
let ColorBrown = colorPanelScene.childNodeWithName("ColorBrown")as! SKSpriteNode
let ColorDarkBrown = colorPanelScene.childNodeWithName("ColorDarkBrown")as! SKSpriteNode
let white = UIColor(red:1, green:0.95, blue:0.71, alpha:1)
let brown = UIColor(red:0.49, green:0.26, blue:0.17, alpha:1)
let red = UIColor(red:0.67, green:0.32, blue:0.21, alpha:1)
let darkBrown = UIColor(red:0.27, green:0.25, blue:0.21, alpha:1)
NodesToColors = [
ColorWhite: white,
ColorRed: red,
ColorBrown: brown,
ColorDarkBrown: darkBrown
]
OverlayBackground = colorPanelScene.childNodeWithName("OverlayBackground")as! SKSpriteNode
}
Then, you can see a blur effect function that I would like to add to the panel background. Do you know how to do it to a SKNode? That would be easy if I use UIView instead, but i don't know how to back layer Views.
func blur(image image: UIImage) -> UIImage {
let radius: CGFloat = 20;
let context = CIContext(options: nil);
let inputImage = CIImage(CGImage: image.CGImage!);
let filter = CIFilter(name: "CIGaussianBlur");
filter?.setValue(inputImage, forKey: kCIInputImageKey);
filter?.setValue("\(radius)", forKey:kCIInputRadiusKey);
let result = filter?.valueForKey(kCIOutputImageKey) as! CIImage;
let rect = CGRectMake(radius * 2, radius * 2, image.size.width - radius * 4, image.size.height - radius * 4)
let cgImage = context.createCGImage(result, fromRect: rect);
let returnImage = UIImage(CGImage: cgImage);
return returnImage;
}
If you look at the buttons on ColorPanelScene.sks they have wrong names because I used a workaround to make that panel works. It seems to match color nodes, textures and nodes names in a inverse way.
That's obviously a bad implementation of a side panel. Please, can you help me to build a better interactive panel? Thank You.