How to make google map tiles tapable in swift - swift

I'm working with google maps sdk in ios9 and I'm currently displaying tile layers over the map with different colors that represent a value. I want to add the ability to tap a tile and display information about the tile such as what the actual value of that tile is. I looked online and the only thing I found was to make transparent markers for each tile and then write a handler for each marker to display the value, but I think there has to be a more efficient way to do this. Can someone lead me in the right direction? Thanks!
class MyMapView: UIView{
var myMap: GMSMapView!
var closestPoint: CLLocationCoordinate2D!
required init?(coder aDecoder: NSCoder) {
fatalError("This class does not support NSCoding")
}
init(closestPoint: CLLocationCoordinate2D!){
super.init(frame: CGRectZero)
self.backgroundColor = UIVariables.blackOpaque
self.closestPoint = closestPoint
var camera = GMSCameraPosition.cameraWithLatitude(AppVariables.location2D!.latitude, longitude: AppVariables.location2D!.longitude, zoom: 6)
self.myMap = GMSMapView.mapWithFrame(CGRectZero, camera: camera)
self.myMap.mapType = GoogleMaps.kGMSTypeSatellite
delay(seconds: 0.5) { () -> () in
let path = GMSMutablePath()
path.addCoordinate(self.closestPoint!)
path.addCoordinate(AppVariables.location2D!)
let bounds = GMSCoordinateBounds(path: path)
self.myMap.moveCamera(GMSCameraUpdate.fitBounds(bounds, withPadding: 50.0))
let layer = MyMapTileLayer()
layer.map = self.myMap
}
self.myMap.translatesAutoresizingMaskIntoConstraints = false
//self.frame = CGRectMake(0, 0, UIScreen.mainScreen().bounds.width, 400)
self.addSubview(self.myMap)
let viewsDictionary = ["map": self.myMap]
let metricDictionary = ["width": self.frame.size.width/2]
let view_constraint_V1:Array = NSLayoutConstraint.constraintsWithVisualFormat("V:|[map]-15-|", options: NSLayoutFormatOptions.AlignAllLeading, metrics: metricDictionary, views: viewsDictionary)
let view_constraint_H1:Array = NSLayoutConstraint.constraintsWithVisualFormat("H:|-10-[map]-10-|", options: NSLayoutFormatOptions(rawValue: 0), metrics: metricDictionary, views: viewsDictionary)
self.addConstraints(view_constraint_V1)
self.addConstraints(view_constraint_H1)
}
func delay(seconds seconds: Double, completion:()->()) {
let popTime = dispatch_time(DISPATCH_TIME_NOW, Int64( Double(NSEC_PER_SEC) * seconds ))
dispatch_after(popTime, dispatch_get_main_queue()) {
completion()
}
}
override func awakeFromNib() {
super.awakeFromNib()
// Initialization code
}
}
class MyTileLayer: GMSTileLayer{
var webService = WebService()
override func requestTileForX(x: UInt, y: UInt, zoom: UInt, receiver: GMSTileReceiver!) {
webService.getTiles(x, y: y, z: zoom, completion: { (result) -> Void in
receiver.receiveTileWithX(x, y: y, zoom: zoom, image: UIImage(data: result))
})
}
}

Related

CGBitmapContext 2x as slow compared to CALayer's draw()

I have some code I can't change that expects to be able to draw at any time. It's the main() function in BackgroundThread below - pretend it can't be modified in any way. Running this will use 70-80% CPU.
If instead of running the thread I replicate what it is doing in View::draw() (i.e. draw 5000 white rectangles at random positions), this will use about 30% CPU.
Where's the difference coming from? Looking at Instruments, although the call stack is the same starting from CGContextFillRect, the View::draw() version only spends 16% of the time doing memset() whereas the threaded version spends 80% of the time.
The code below is the FAST version. Comment out the FAST lines and uncomment the SLOW lines to switch to the SLOW (threaded) version. Compile with swiftc test.swift -otest && ./test. I'm on macOS 10.13, integrated graphics, if that matters.
Is there anything I can do to make the threaded version as fast as the View::draw() version?
import Cocoa
let NSApp = NSApplication.shared,
vwaitSem = DispatchSemaphore(value: 0)
var
mainWindow: NSWindow?,
screen: CGContext?,
link: CVDisplayLink?
class View: NSView, CALayerDelegate {
var lastTime: CFTimeInterval = 0
override var acceptsFirstResponder: Bool {return true}
required init(coder aDecoder: NSCoder) {fatalError("This class does not support NSCoding")}
override func makeBackingLayer() -> CALayer {return CALayer()}
override init(frame: CGRect) {
super.init(frame: frame)
self.wantsLayer = true
self.layer?.contentsScale = 2.0
self.layer?.backgroundColor = CGColor(red:0, green:0, blue:0, alpha: 1)
self.layerContentsRedrawPolicy = NSView.LayerContentsRedrawPolicy.onSetNeedsDisplay // FAST
}
func draw(_ layer: CALayer, in ctx: CGContext) {
let now = CACurrentMediaTime(), timePassed = ((now-lastTime)*1000).rounded()
// NSLog("\(timePassed)")
lastTime = now
ctx.setFillColor(CGColor.white)
ctx.setStrokeColor(CGColor.white)
for _ in 0...5000 {
let rect = CGRect(x: CGFloat(arc4random_uniform(640)+1), y: CGFloat(arc4random_uniform(480)+1), width:6, height:6)
ctx.setFillColor(CGColor.white)
ctx.fill(rect)
}
}
}
func displayLinkOutputCallback(_ displayLink: CVDisplayLink, _ nowPtr: UnsafePointer<CVTimeStamp>,
_ outputTimePtr: UnsafePointer<CVTimeStamp>, _ flagsIn: CVOptionFlags, _ flagsOut: UnsafeMutablePointer<CVOptionFlags>,
_ displayLinkContext: UnsafeMutableRawPointer?) -> CVReturn {
DispatchQueue.main.async {
// mainWindow!.contentView!.layer!.contents = screen!.makeImage() // SLOW
mainWindow!.contentView!.display() // FAST
vwaitSem.signal()
}
return kCVReturnSuccess
}
class BackgroundThread: Thread {
var lastTime: CFTimeInterval = 0
override func main() {
while true {
let now = CACurrentMediaTime(), timePassed = ((now-lastTime)*1000).rounded()
// NSLog("\(timePassed)")
lastTime = now
screen?.clear(CGRect(x:0, y:0, width:640*2, height:480*2))
for _ in 0...5000 {
screen?.setFillColor(CGColor.white)
screen?.setStrokeColor(CGColor.white)
screen?.fill(CGRect(x: CGFloat(arc4random_uniform(640*2)+1), y: CGFloat(arc4random_uniform(480*2)+1), width: 6*2, height: 6*2))
}
vwaitSem.wait()
}
}
}
let width = 640, height = 480,
appMenuItem = NSMenuItem(),
quitMenuItem = NSMenuItem(title:"Quit",
action:#selector(NSApplication.terminate), keyEquivalent:"q"),
window = NSWindow(contentRect:NSMakeRect(0,0, CGFloat(width), CGFloat(height)),
styleMask:[.closable,.titled], backing:.buffered, defer:false),
colorProfile = ColorSyncProfileCreateWithDisplayID(0),
colorSpace = CGColorSpace(platformColorSpaceRef: colorProfile!.toOpaque()),
screen_ = CGContext(data: nil, width: Int(width)*2, height:Int(height)*2, bitsPerComponent:8, bytesPerRow: 0,
space: colorSpace!, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue),
backgroundThread = BackgroundThread()
NSApp.setActivationPolicy(NSApplication.ActivationPolicy.regular)
NSApp.mainMenu = NSMenu()
NSApp.mainMenu?.addItem(appMenuItem)
appMenuItem.submenu = NSMenu()
appMenuItem.submenu?.addItem(quitMenuItem)
window.cascadeTopLeft(from:NSMakePoint(20,20))
window.makeKeyAndOrderFront(nil)
window.contentView = View()
window.makeFirstResponder(window.contentView)
NSApp.activate(ignoringOtherApps:true)
mainWindow = window
screen = screen_
CVDisplayLinkCreateWithCGDisplay(CGMainDisplayID(), &link)
CVDisplayLinkSetOutputCallback(link!, displayLinkOutputCallback, UnsafeMutableRawPointer(Unmanaged.passUnretained(window).toOpaque()))
CVDisplayLinkStart(link!)
// backgroundThread.start() // SLOW
NSApp.run()
I misread the note in the documentation for makeImage() and thought it would not copy the data unless it really had to. Well, Instruments shows it does copy the data. Every single frame.
So I switched to Metal and now I can draw from the background thread with the same performance/CPU usage as with CGContext alone, with no copies as far as I can tell.
Here's some working code:
import Cocoa
import MetalKit
class View: MTKView {
var screen: CGContext?
var commandQueue: MTLCommandQueue?
var buffer: MTLBuffer?
var texture: MTLTexture?
var vwaitSem = DispatchSemaphore(value: 0)
var backgroundThread: Thread?
var allocationSize = 0
func alignUp(size: Int, align: Int) -> Int {return (size+(align-1)) & ~(align-1)}
override var acceptsFirstResponder: Bool {return true}
required init(coder aDecoder: NSCoder) {fatalError("This class does not support NSCoding")}
init() {super.init(frame: CGRect(x:0, y:0, width:0, height: 0), device: MTLCreateSystemDefaultDevice())}
override func viewDidMoveToWindow() {
layer?.contentsScale = NSScreen.main!.backingScaleFactor
let metalLayer = layer as! CAMetalLayer
let pixelRowAlignment = metalLayer.device!.minimumLinearTextureAlignment(for: metalLayer.pixelFormat)
let bytesPerRow = alignUp(size: Int(layer!.frame.width)*Int(layer!.contentsScale)*4, align: pixelRowAlignment)
let pagesize = Int(getpagesize())
var data: UnsafeMutableRawPointer? = nil
allocationSize = alignUp(size: bytesPerRow*Int(layer!.frame.height)*Int(layer!.contentsScale), align: pagesize)
posix_memalign(&data, pagesize, allocationSize)
let colorProfile = ColorSyncProfileCreateWithDisplayID(0),
colorSpace = CGColorSpace(platformColorSpaceRef: colorProfile!.toOpaque()),
screen_ = CGContext(data: data,
width: Int(layer!.frame.width)*Int(layer!.contentsScale),
height: Int(layer!.frame.height)*Int(layer!.contentsScale),
bitsPerComponent:8, bytesPerRow: bytesPerRow,
space: colorSpace!, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)!,
buffer_ = metalLayer.device!.makeBuffer(bytesNoCopy: data!, length: allocationSize, options: .storageModeManaged,
deallocator: { pointer, length in free(self.screen!.data!) })!,
textureDescriptor = MTLTextureDescriptor()
textureDescriptor.pixelFormat = metalLayer.pixelFormat
textureDescriptor.width = screen_.width
textureDescriptor.height = screen_.height
textureDescriptor.storageMode = buffer_.storageMode
textureDescriptor.usage = MTLTextureUsage(rawValue: MTLTextureUsage.shaderRead.rawValue)
texture = buffer_.makeTexture(descriptor: textureDescriptor, offset: 0, bytesPerRow: screen_.bytesPerRow)
commandQueue = device?.makeCommandQueue()
screen = screen_
buffer = buffer_
backgroundThread = BackgroundThread(screen: screen!, vwaitSem: vwaitSem)
backgroundThread!.start()
}
override func draw(_ dirtyRect: NSRect) {
if let drawable = currentDrawable {
buffer!.didModifyRange(0..<allocationSize)
texture!.replace(region: MTLRegionMake2D(0,0, screen!.width, screen!.height),
mipmapLevel:0, slice:0, withBytes: screen!.data!, bytesPerRow: screen!.bytesPerRow, bytesPerImage: 0)
let commandBuffer = commandQueue!.makeCommandBuffer()!
let blitPass = commandBuffer.makeBlitCommandEncoder()!
blitPass.copy(from: texture!, sourceSlice:0, sourceLevel:0, sourceOrigin: MTLOrigin(x:0,y:0,z:0),
sourceSize: MTLSize(width:screen!.width, height:screen!.height, depth: 1),
to: drawable.texture, destinationSlice:0, destinationLevel:0, destinationOrigin: MTLOrigin(x:0,y:0,z:0))
blitPass.endEncoding()
if let renderPass = currentRenderPassDescriptor {
renderPass.colorAttachments[0].texture = drawable.texture
renderPass.colorAttachments[0].loadAction = .load
commandBuffer.makeRenderCommandEncoder(descriptor: renderPass)!.endEncoding()
commandBuffer.addCompletedHandler {cb in self.vwaitSem.signal()}
commandBuffer.present(drawable)
commandBuffer.commit()
}
}
}
}
class BackgroundThread: Thread {
var screen: CGContext
var vwaitSem: DispatchSemaphore
var x = 0
init(screen:CGContext, vwaitSem:DispatchSemaphore) {
self.screen = screen
self.vwaitSem = vwaitSem
}
override func main() {
while true {
// screen.clear(CGRect(x:0,y:0, width:screen.width, height:screen.height))
// screen.setFillColor(CGColor.white)
// screen.fill(CGRect(x:x, y:0, width:100, height:100))
// x += 1
screen.clear(CGRect(x:0,y:0, width:screen.width, height:screen.height))
screen.setFillColor(CGColor.white)
let screenWidth = UInt32(screen.width), screenHeight = UInt32(screen.height)
for _ in 0...5000 {
let rect = CGRect(x: CGFloat(arc4random_uniform(screenWidth+1)),
y: CGFloat(arc4random_uniform(screenHeight+1)), width:6, height:6)
screen.fill(rect)
}
vwaitSem.wait()
}
}
}
let width = 640, height = 480,
appMenuItem = NSMenuItem(),
quitMenuItem = NSMenuItem(title:"Quit",
action:#selector(NSApplication.terminate), keyEquivalent:"q"),
window = NSWindow(contentRect:NSMakeRect(0,0, CGFloat(width), CGFloat(height)),
styleMask:[.closable,.titled], backing:.buffered, defer:false)
NSApp.setActivationPolicy(NSApplication.ActivationPolicy.regular)
NSApp.mainMenu = NSMenu()
NSApp.mainMenu?.addItem(appMenuItem)
appMenuItem.submenu = NSMenu()
appMenuItem.submenu?.addItem(quitMenuItem)
window.cascadeTopLeft(from:NSMakePoint(20,20))
window.makeKeyAndOrderFront(nil)
window.contentView = View()
window.makeFirstResponder(window.contentView)
NSApp.activate(ignoringOtherApps:true)
NSApp.run()

Navigation view transition full screen to a view with corner radius

I am trying to create an app home screen animation from splash, like after launch screen completed (full)screen color transforms into an app logo background color. Currently below code kind of archive what I expected. But, that transformation CAShapeLayer doesn't do with corner radius. Without corner radius it works as normal, when I try to use circle/oval/corner radius animation seems like below gif.
Tried few other StackOverflow answers which create circle animation those are not working. Here one of those.
weak var viewTransitionContext: UIViewControllerContextTransitioning!
func animateTransition(using transitionContext: UIViewControllerContextTransitioning) {
viewTransitionContext = transitionContext
guard let fromVC = viewTransitionContext.viewController(forKey: .from) else { return }
guard let toVC = viewTransitionContext.viewController(forKey: .to) else { return }
if fromVC.isKind(of: SOGSplashViewController.self) && toVC.isKind(of: SOGHomeViewController.self) {
guard let toVCView = transitionContext.view(forKey: .to) else { return }
guard let fromVCView = transitionContext.view(forKey: .from) else { return }
let containerView = transitionContext.containerView
let labelWidth = UIDevice.width() * 0.75
let labelHeight = labelWidth * 0.7
let xAxis = (UIDevice.width() - labelWidth)/2.0
let yAxis = ((UIDevice.height()/2.0) - labelHeight)/2.0
let labelRect = CGRect(x: xAxis, y: yAxis, width: labelWidth, height: labelHeight)
let radius = (UIDevice.height()/2.0)*0.1
let fromFrame = fromVCView.bounds
let animationTime = transitionDuration(using: transitionContext)
let maskLayer = CAShapeLayer()
maskLayer.isOpaque = false
maskLayer.fillColor = fromVCView.backgroundColor?.cgColor
maskLayer.backgroundColor = UIColor.clear.cgColor
maskLayer.path = toPathValue.cgPath
let maskAnimationLayer = CABasicAnimation(keyPath: "path")
maskAnimationLayer.fromValue = (UIBezierPath(rect: fromFrame)).cgPath
maskAnimationLayer.toValue = toPathValue.cgPath
maskAnimationLayer.duration = animationTime
maskAnimationLayer.delegate = self as? CAAnimationDelegate
containerView.addSubview(fromVCView)
containerView.addSubview(toVCView)
fromVCView.layer.add(maskAnimationLayer, forKey: nil)
maskLayer.add(maskAnimationLayer, forKey: "path")
containerView.layer.addSublayer(maskLayer)
let deadLineTime = DispatchTime.now() + .seconds(1)
DispatchQueue.main.asyncAfter(deadline: deadLineTime) {
UIView.animate(withDuration: 0.2, animations: {
maskLayer.opacity = 0
}, completion: { (isSuccess) in
self.viewTransitionContext.completeTransition(true)
})
}
}
}
Transforming a rectangular path to a rounded rectangular path is a very complex operation if you do it through a generic way like Core Animation.. You should better use the cornerRadius property of CALayer which is animatable.
Here is a working example with a constraint based animation:
class ViewController: UIViewController {
#IBOutlet var constraints: [NSLayoutConstraint]!
#IBOutlet var contentView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewDidAppear(_ animated: Bool) {
self.contentView.layer.cornerRadius = 10.0
self.animate(nil)
}
#IBAction func animate(_ sender: Any?) {
for c in constraints {
c.constant = 40.0
}
UIView.animate(withDuration: 4.0) {
self.view.layoutIfNeeded()
self.contentView.layer.cornerRadius = 40.0
}
}
}
contentView points to the inner view which should be animated, and constraints refer to the four layout constraints defining the distances from the view controller's view to the content view.
This is just a simple, rough example, which can certainly be improved.

UIProgressView setProgress issue

I've encountered an issue using a UIProgressView where low values (1% - about 10%) look off. You can see with the example above that 97% looks accurate while 2% does not.
Here's the code for setting colors:
self.progressView.trackTintColor = UIColor.green.withAlphaComponent(0.3)
self.progressView.tintColor = UIColor.green.withAlphaComponent(1.0)
But, if I comment out the trackTintColor or the tintColor, then the 2% looks correct. Why when using these together does it cause this issue? Just an Xcode bug? Has anyone resolved this before?
I've experienced the same issue in my project. For me it's fixed by using progressTintColor instead of tintColor.
progressView.progressTintColor = UIColor.green.withAlphaComponent(1.0)
progressView.trackTintColor = UIColor.green.withAlphaComponent(0.3)
you need to create color image
SWIFT 3 Example:
class ViewController: UIViewController {
#IBOutlet weak var progressView: UIProgressView!
#IBAction func lessButton(_ sender: UIButton) {
let percentage = 20
let invertedValue = Float(100 - percentage) / 100
progressView.setProgress(invertedValue, animated: true)
}
#IBAction func moreButton(_ sender: UIButton) {
let percentage = 80
let invertedValue = Float(100 - percentage) / 100
progressView.setProgress(invertedValue, animated: true)
}
override func viewDidLoad() {
super.viewDidLoad()
//create gradient view the size of the progress view
let gradientView = GradientView(frame: progressView.bounds)
//convert gradient view to image , flip horizontally and assign as the track image
progressView.trackImage = UIImage(view: gradientView).withHorizontallyFlippedOrientation()
//invert the progress view
progressView.transform = CGAffineTransform(scaleX: -1.0, y: -1.0)
progressView.progressTintColor = UIColor.black
progressView.progress = 1
}
}
extension UIImage{
convenience init(view: UIView) {
UIGraphicsBeginImageContext(view.frame.size)
view.layer.render(in: UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
self.init(cgImage: (image?.cgImage)!)
}
}
#IBDesignable
class GradientView: UIView {
private var gradientLayer = CAGradientLayer()
private var vertical: Bool = false
override func draw(_ rect: CGRect) {
super.draw(rect)
// Drawing code
//fill view with gradient layer
gradientLayer.frame = self.bounds
//style and insert layer if not already inserted
if gradientLayer.superlayer == nil {
gradientLayer.startPoint = CGPoint(x: 0, y: 0)
gradientLayer.endPoint = vertical ? CGPoint(x: 0, y: 1) : CGPoint(x: 1, y: 0)
gradientLayer.colors = [UIColor.green.cgColor, UIColor.red.cgColor]
gradientLayer.locations = [0.0, 1.0]
self.layer.insertSublayer(gradientLayer, at: 0)
}
}
}

I can't move around a loaded SceneKit model

Below is the function that loads my object. Whenever I run the app, the object loads perfectly but when I try getting close or moving around, it moves along with the camera. I tried loading using hit test, it works but then collisions do not work when I use hit results and world positioning.
func addBackboard() {
guard let bucketScene = SCNScene(named:"art.scnassets/BucketBlue.scn") else {
return
}
guard let bucketNode = bucketScene.rootNode.childNode(withName: "tube", recursively: false) else {
return
}
bucketNode.scale = SCNVector3Make(0.5, 0.5, 0.5);
bucketNode.position = SCNVector3(x: 0, y: -3.5, z: -5)
let physicsShape = SCNPhysicsShape(node: bucketNode, options: [SCNPhysicsShape.Option.type: SCNPhysicsShape.ShapeType.concavePolyhedron])
let physicsBody = SCNPhysicsBody(type: .static, shape: physicsShape)
bucketNode.physicsBody = physicsBody
sceneView.scene.rootNode.addChildNode(bucketNode)
}
I found some Apple documentation that might help. Summary: "To track the static positions and orientations of real or virtual objects relative to the camera, create anchor objects and use the add(anchor:) method to add them to your AR session."
And then, did you implement the ARSCNViewDelegate methods?
I suppose your code should look like the following one:
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
//.................................
func addBackboard() {
sceneView.delegate = self
guard let scene = SCNScene(named:"art.scnassets/BucketBlue.scn") else {
return
}
sceneView.scene = scene
let bucketNode = SCNNode()
bucketNode.geometry = SCNTube()
bucketNode.scale = SCNVector3(0.5, 0.5, 0.5)
bucketNode.position = SCNVector3(x: 0, y: -3.5, z: -5)
let physicsShape = SCNPhysicsShape(
node: bucketNode,
options: [SCNPhysicsShape.Option.type: SCNPhysicsShape.ShapeType.concavePolyhedron])
let physicsBody = SCNPhysicsBody(type: .static, shape: physicsShape)
bucketNode.physicsBody = physicsBody
scene.rootNode.addChildNode(bucketNode)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
let configuration = ARWorldTrackingConfiguration()
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
}

Access Geometry Child Node Scenekit

I am trying to access the childnode: boxNode, and then rotate the node using the input from a pan gesture recognizer. How should I access this node so I can manipulate it? Should I declare the cube node in a global scope and modify that way? I am new to Swift so I apologize if this code looks sloppy. I would like to add rotation actions inside of my panGesture() function. Thanks!
import UIKit
import SceneKit
class GameViewController: UIViewController {
var scnView: SCNView!
var scnScene: SCNScene!
override func viewDidLoad() {
super.viewDidLoad()
setupView()
setupScene()
}
// override var shouldAutorotate: Bool {
// return true
// }
//
// override var prefersStatusBarHidden: Bool {
// return true
// }
func setupView() {
scnView = self.view as! SCNView
}
func setupScene() {
scnScene = SCNScene()
scnView.scene = scnScene
scnView.backgroundColor = UIColor.blueColor()
initCube()
initLight()
initCamera()
}
func initCube () {
var cubeGeometry:SCNGeometry
cubeGeometry = SCNBox(width: 1.0, height: 1.0, length: 1.0, chamferRadius: 0.0)
let boxNode = SCNNode(geometry: cubeGeometry)
scnScene.rootNode.addChildNode(boxNode)
}
func initLight () {
let light = SCNLight()
light.type = SCNLightTypeAmbient
let lightNode = SCNNode()
lightNode.light = light
light.castsShadow = false
lightNode.position = SCNVector3(x: 1.5, y: 1.5, z: 1.5)
scnScene.rootNode.addChildNode(lightNode)
}
func initCamera () {
let camera = SCNCamera()
let cameraNode = SCNNode()
cameraNode.camera = camera
cameraNode.position = SCNVector3(x: 0.0, y: 0.0, z: 5.0)
scnScene.rootNode.addChildNode(cameraNode)
}
func initRecognizer () {
let panTestRecognizer = UIPanGestureRecognizer(target: self, action: #selector(GameViewController.panGesture(_:)))
scnView.addGestureRecognizer(panTestRecognizer)
}
//TAKE GESTURE INPUT AND ROTATE CUBE ACCORDINGLY
func panGesture(sender: UIPanGestureRecognizer) {
//let translation = sender.translationInView(sender.view!)
}
}
First of all you should update your Xcode. It looks like this is an older Swift version you are using. SCNLightTypeAmbient is .ambient in Swift 3.
So the way you should go is by giving your nodes names to identify them:
myChildNode.name = "FooChildBar"
and then you can call on your parent node
let theNodeYouAreLookingFor = parentNode.childNode(withName: "FooChildBar", recursively: true)
You can also use this on your scene's root node
let theNodeYouAreLookingFor = scene.rootNode.childNode(withName: "FooChildBar", recursively: true)
which will look at all nodes in your scene and return the one that you gave the name.