MetalKit - Drawable.texture assertion error - swift

I am new to MetalKit and trying to convert this tutorial from playground back to OSX app:
import MetalKit
public class MetalView: MTKView {
var queue: MTLCommandQueue! = nil
var cps: MTLComputePipelineState! = nil
required public init(coder: NSCoder) {
super.init(coder: coder)
device = MTLCreateSystemDefaultDevice()
registerShaders()
}
override public func drawRect(dirtyRect: NSRect) {
super.drawRect(dirtyRect)
if let drawable = currentDrawable {
let command_buffer = queue.commandBuffer()
let command_encoder = command_buffer.computeCommandEncoder()
command_encoder.setComputePipelineState(cps)
command_encoder.setTexture(drawable.texture, atIndex: 0)
let threadGroupCount = MTLSizeMake(8, 8, 1)
let threadGroups = MTLSizeMake(drawable.texture.width / threadGroupCount.width, drawable.texture.height / threadGroupCount.height, 1)
command_encoder.dispatchThreadgroups(threadGroups, threadsPerThreadgroup: threadGroupCount)
command_encoder.endEncoding()
command_buffer.presentDrawable(drawable)
command_buffer.commit()
}
}
func registerShaders() {
queue = device!.newCommandQueue()
do {
let library = device!.newDefaultLibrary()!
let kernel = library.newFunctionWithName("compute")!
cps = try device!.newComputePipelineStateWithFunction(kernel)
} catch let e {
Swift.print("\(e)")
}
}
}
I got an error at the line:
command_encoder.setTexture(drawable.texture, atIndex: 0)
failed assertion `frameBufferOnly texture not supported for compute.'
How can I resolve this?

If you want to write to a drawable's texture from a compute function, you'll need to tell the MTKView that it should configure its layer not to be framebuffer-only:
metalView.framebufferOnly = false
With this value set to false, your drawable will give you a texture with the shaderWrite usage flag set, which is required when writing a texture from a shader function.

I have the same issue even I set the frameBufferOnly to false and print the boolean for double check before using the drawable.
But when I turn off the GPU frame capture, no more assertion occurs (for now at least). Still don't know why.
Menu > Scheme > Edit Scheme > Run > Options > GPU Frame Capture: Disabled

Related

Use MetalView with SwiftUI? How do I put something to display in there?

I'm stuck with SwiftUI and Metal up to the point of being about to give up.
I got this example from https://developer.apple.com/forums/thread/119112?answerId=654964022#654964022 :
import MetalKit
struct MetalView: NSViewRepresentable {
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
func makeNSView(context: NSViewRepresentableContext<MetalView>) -> MTKView {
let mtkView = MTKView()
mtkView.delegate = context.coordinator
mtkView.preferredFramesPerSecond = 60
mtkView.enableSetNeedsDisplay = true
if let metalDevice = MTLCreateSystemDefaultDevice() {
mtkView.device = metalDevice
}
mtkView.framebufferOnly = false
mtkView.clearColor = MTLClearColor(red: 0, green: 0, blue: 0, alpha: 0)
mtkView.drawableSize = mtkView.frame.size
mtkView.enableSetNeedsDisplay = true
return mtkView
}
func updateNSView(_ nsView: MTKView, context: NSViewRepresentableContext<MetalView>) {
}
class Coordinator : NSObject, MTKViewDelegate {
var parent: MetalView
var metalDevice: MTLDevice!
var metalCommandQueue: MTLCommandQueue!
init(_ parent: MetalView) {
self.parent = parent
if let metalDevice = MTLCreateSystemDefaultDevice() {
self.metalDevice = metalDevice
}
self.metalCommandQueue = metalDevice.makeCommandQueue()!
super.init()
}
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {
}
func draw(in view: MTKView) {
guard let drawable = view.currentDrawable else {
return
}
let commandBuffer = metalCommandQueue.makeCommandBuffer()
let rpd = view.currentRenderPassDescriptor
rpd?.colorAttachments[0].clearColor = MTLClearColorMake(0, 1, 0, 1)
rpd?.colorAttachments[0].loadAction = .clear
rpd?.colorAttachments[0].storeAction = .store
let re = commandBuffer?.makeRenderCommandEncoder(descriptor: rpd!)
re?.endEncoding()
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
}
}
... but I can't get my head around how to use this MetalView(), which does seem to work when I call it from a SwiftUI view, to display data. I want to use it to display a CIImage which will be filtered and manipulated with CIFilters...
Can someone please point me in the right direction on how to tell this view how to display something? I think I need it to display the content of a texture but tried countless hours and ended up starting from scratch for more countless times...
This is how I run my image filters now but it results in very slow sliders, which is why I decided to try learning about Metal... but it's been really time-consuming and. frustrating due to the lack of documentation...
func ciExposure (inputImage: CIImage, inputEV: Double) -> CIImage {
let filter = CIFilter(name: "CIExposureAdjust")!
filter.setValue(inputImage, forKey: kCIInputImageKey)
filter.setValue(inputEV, forKey: kCIInputEVKey)
return filter.outputImage!
}
I think I need to take that filter.outputImage and pass it on to the MetalView somehow?
Any help is really, really appreciated...
Apple's WWDC 2022 contained a tutorial/video entitled "Display EDR Content with Core Image, Metal, and SwiftUI" which describes how to blend Core Image with Metal and SwiftUI. It points to some new sample code entitled "Generating an Animation with a Core Image Render Destination" (here).
This sample project is very CoreImage-centric (which should suit your purposes nicely), but I wish Apple would post more sample-code examples showing Metal integrated with SwiftUI.
I have a small Core Image + SwiftUI sample project on Github that might be a good starting point for you. It doesn't cover a lot yet, but it demonstrates how to display filtered camera frames already.
Especially check out the draw function of the view. It's used to render a CIImage into the MTKView (you can do the same in your delegate's draw function).
Ok so this does the trick for me:
func draw(in view: MTKView) {
guard let drawable = view.currentDrawable else {
return
}
let colorSpace = CGColorSpaceCreateDeviceRGB()
let commandBuffer = metalCommandQueue.makeCommandBuffer()
let rpd = view.currentRenderPassDescriptor
rpd?.colorAttachments[0].clearColor = MTLClearColorMake(0, 1, 0, 1)
rpd?.colorAttachments[0].loadAction = .clear
rpd?.colorAttachments[0].storeAction = .store
let re = commandBuffer?.makeRenderCommandEncoder(descriptor: rpd!)
re?.endEncoding()
context.render((AppState.shared.rawImage ?? AppState.shared.rawImageOriginal)!,
to: drawable.texture,
commandBuffer: commandBuffer,
bounds: AppState.shared.rawImageOriginal!.extent,
colorSpace: colorSpace)
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
AppState.shared.rawImage is my CIImage texture I got from my filtering function.
The context is made somewhere else but should be:
context = CIContext(mtlDevice: metalDevice)
Next up is adding the centering part of the code provided by Frank Schlegel.

How do I update a CALayer with a CVPixelBuffer/IOSurface?

I have an IOSurface-backed CVPixelBuffer that is getting updated from an outside source at 30fps. I want to render a preview of the image data in an NSView -- what's the best way for me to do that?
I can directly set the .contents of a CALayer on the view, but that only updates the first time my view updates (or if, say, I resize the view). I've been poring over the docs but I can't figure out the correct invocation of needsDisplay on the layer or view to let the view infrastructure know to refresh itself, especially when updates are coming from outside the view.
Ideally I'd just bind the IOSurface to my layer and any changes I make to it would be propagated, but I'm not sure if that's possible.
class VideoPreviewController: NSViewController, VideoFeedConsumer {
let customLayer : CALayer = CALayer()
override func viewDidLoad() {
super.viewDidLoad()
// Do view setup here.
print("Loaded our video preview")
view.layer?.addSublayer(customLayer)
customLayer.frame = view.frame
// register our view with the browser service
VideoFeedBrowser.instance.registerConsumer(self)
}
override func viewWillDisappear() {
// deregister our view from the video feed
VideoFeedBrowser.instance.deregisterConsumer(self)
super.viewWillDisappear()
}
// This callback gets called at 30fps whenever the pixelbuffer is updated
#objc func updateFrame(pixelBuffer: CVPixelBuffer) {
guard let surface = CVPixelBufferGetIOSurface(pixelBuffer)?.takeUnretainedValue() else {
print("pixelbuffer isn't IOsurface backed! noooooo!")
return;
}
// Try and tell the view to redraw itself with new contents?
// These methods don't work
//self.view.setNeedsDisplay(self.view.visibleRect)
//self.customLayer.setNeedsDisplay()
self.customLayer.contents = surface
}
}
Here's my attempt of a scaling version that's NSView rather than NSViewController-based, that also doesn't update correctly (or scale correctly for that matter):
class VideoPreviewThumbnail: NSView, VideoFeedConsumer {
required init?(coder decoder: NSCoder) {
super.init(coder: decoder)
self.wantsLayer = true
// register our view with the browser service
VideoFeedBrowser.instance.registerConsumer(self)
}
override init(frame frameRect: NSRect) {
super.init(frame: frameRect)
self.wantsLayer = true
// register our view with the browser service
VideoFeedBrowser.instance.registerConsumer(self)
}
deinit{
VideoFeedBrowser.instance.deregisterConsumer(self)
}
override func updateLayer() {
// Do I need to put something here?
print("update layer")
}
#objc
func updateFrame(pixelBuffer: CVPixelBuffer) {
guard let surface = CVPixelBufferGetIOSurface(pixelBuffer)?.takeUnretainedValue() else {
print("pixelbuffer isn't IOsurface backed! noooooo!")
return;
}
self.layer?.contents = surface
self.layer?.transform = CATransform3DMakeScale(
self.frame.width / CGFloat(CVPixelBufferGetWidth(pixelBuffer)),
self.frame.height / CGFloat(CVPixelBufferGetHeight(pixelBuffer)),
CGFloat(1))
}
}
What am I missing?
Maybe I'm wrong, but I think you are you updating your NSView on a background thread. (I suppose that the callback to updateFrame is on a background thread)
If I'm right, when you want to update the NSView, convert your pixelBuffer to whatever you want (NSImage?), and then dispatch it on the main thread.
Pseudocode (I don't work often with CVPixelBuffer so I'm not sure this is the right way to convert to an NSImage)
let ciImage = CIImage(cvImageBuffer: pixelBuffer)
let context = CIContext(options: nil)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let cgImage = context.createCGImage(ciImage, from: CGRect(x: 0, y: 0, width: width, height: height))
let nsImage = NSImage(cgImage: cgImage, size: CGSize(width: width, height: height))
DispatchQueue.main.async {
// assign the NSImage to your NSView here
}
Another catch: I did some tests, and it seems that you cannot assign an IOSurface directly to the contents of a CALayer.
I tried with this:
let textureImageWidth = 1024
let textureImageHeight = 1024
let macPixelFormatString = "ARGB"
var macPixelFormat: UInt32 = 0
for c in macPixelFormatString.utf8.reversed() {
macPixelFormat *= 256
macPixelFormat += UInt32(c)
}
let ioSurface = IOSurfaceCreate([kIOSurfaceWidth: textureImageWidth,
kIOSurfaceHeight: textureImageHeight,
kIOSurfaceBytesPerElement: 4,
kIOSurfaceBytesPerRow: textureImageWidth * 4,
kIOSurfaceAllocSize: textureImageWidth * textureImageHeight * 4,
kIOSurfacePixelFormat: macPixelFormat] as CFDictionary)!
IOSurfaceLock(ioSurface, IOSurfaceLockOptions.readOnly, nil)
let test = CIImage(ioSurface: ioSurface)
IOSurfaceUnlock(ioSurface, IOSurfaceLockOptions.readOnly, nil)
v1?.layer?.contents = ioSurface
Where v1 is my view. No effect
Even with a CIImage no effect (just last few lines)
IOSurfaceLock(ioSurface, IOSurfaceLockOptions.readOnly, nil)
let test = CIImage(ioSurface: ioSurface)
IOSurfaceUnlock(ioSurface, IOSurfaceLockOptions.readOnly, nil)
v1?.layer?.contents = test
If I create a CGImage it works
IOSurfaceLock(ioSurface, IOSurfaceLockOptions.readOnly, nil)
let test = CIImage(ioSurface: ioSurface)
IOSurfaceUnlock(ioSurface, IOSurfaceLockOptions.readOnly, nil)
let context = CIContext.init()
let img = context.createCGImage(test, from: test.extent)
v1?.layer?.contents = img
I encountered this problem myself and the solution is to double buffer the IOSurface source: use two IOSurface objects instead of one and render to the current surface, set the surface to the layer contents and then on the next rendering pass use the alternate (back/front) surface and then swap.
It would appear that setting the CALayer.contents twice to the same CVPixelBufferRef has no effect. However, if you alternate between two IOSurfaceRef it works wonderfully.
It maybe also possible to invalidate the layer contents by setting it to nil and then reset. I did not try that case but am using the double buffer technique.
If you have some IBActions that update it then create an observed variable with the didSet block and whenever the IBAction is triggered, change its value. Also remember to write the code you want to run when updated in that block.
I'd suggest making the variable an Int, set its default value to 0 and add 1 to it every time it updates.
And you can cast the NSView into an NSImageView for the part where you ask about showing the IMAGE data on an NSView so that does the job.
You need to convert the pixel buffer to CGImage and convert it to a layer so that you can change the layer of the main view.
Please try this code
#objc
func updateFrame(pixelBuffer: CVPixelBuffer) {
guard let surface = CVPixelBufferGetIOSurface(pixelBuffer)?.takeUnretainedValue() else {
print("pixelbuffer isn't IOsurface backed! noooooo!")
return;
}
void *baseAddr = CVPixelBufferGetBaseAddress(pixelBuffer);
size_t width = CVPixelBufferGetWidth(pixelBuffer);
size_t height = CVPixelBufferGetHeight(pixelBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef cgContext = CGBitmapContextCreate(baseAddr, width, height, 8, CVPixelBufferGetBytesPerRow(pixelBuffer), colorSpace, kCGImageAlphaNoneSkipLast);
CGImageRef cgImage = CGBitmapContextCreateImage(cgContext);
CGContextRelease(cgContext);
let outputImage = UIImage(cgImage: outputCGImage, scale: 1, orientation: img.imageOrientation)
let newLayer:CGLayer = CGLayer.init(cgImage: outputImage)
self.layer = newLayer
CVPixelBufferUnlockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly);
CVPixelBufferRelease(pixelBuffer);
}

Why is an iPhone XS getting worse CPU performance when using the camera live than an iPhone 6S Plus?

I'm using live camera output to update a CIImage on a MTKView. My main issue is that I have a large, negative performance difference where an older iPhone gets better CPU performance than a newer one, despite all their settings I've come across are the same.
This is a lengthy post, but I decided to include these details since they could be important to the cause of this problem. Please let me know what else I can include.
Below, I have my captureOutput function with two debug bools that I can turn on and off while running. I used this to try to determine the cause of my issue.
applyLiveFilter - bool whether or not to manipulate the CIImage with a CIFilter.
updateMetalView - bool whether or not to update the MTKView's CIImage.
// live output from camera
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection){
/*
Create CIImage from camera.
Here I save a few percent of CPU by using a function
to convert a sampleBuffer to a Metal texture, but
whether I use this or the commented out code
(without captureOutputMTLOptions) does not have
significant impact.
*/
guard let texture:MTLTexture = convertToMTLTexture(sampleBuffer: sampleBuffer) else{
return
}
var cameraImage:CIImage = CIImage(mtlTexture: texture, options: captureOutputMTLOptions)!
var transform: CGAffineTransform = .identity
transform = transform.scaledBy(x: 1, y: -1)
transform = transform.translatedBy(x: 0, y: -cameraImage.extent.height)
cameraImage = cameraImage.transformed(by: transform)
/*
// old non-Metal way of getting the ciimage from the cvPixelBuffer
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else
{
return
}
var cameraImage:CIImage = CIImage(cvPixelBuffer: pixelBuffer)
*/
var orientation = UIImage.Orientation.right
if(isFrontCamera){
orientation = UIImage.Orientation.leftMirrored
}
// apply filter to camera image
if debug_applyLiveFilter {
cameraImage = self.applyFilterAndReturnImage(ciImage: cameraImage, orientation: orientation, currentCameraRes:currentCameraRes!)
}
DispatchQueue.main.async(){
if debug_updateMetalView {
self.MTLCaptureView!.image = cameraImage
}
}
}
Below is a chart of results between both phones toggling the different combinations of bools discussed above:
Even without the Metal view's CIIMage updating and no filters being applied, the iPhone XS's CPU is 2% greater than iPhone 6S Plus's, which isn't a significant overhead, but makes me suspect that somehow how the camera is capturing is different between the devices.
My AVCaptureSession's preset is set identically between both phones
(AVCaptureSession.Preset.hd1280x720)
The CIImage created from captureOutput is the same size (extent)
between both phones.
Are there any settings I need to set manually between these two phones AVCaptureDevice's settings, including activeFormat properties, to make them the same between devices?
The settings I have now are:
if let captureDevice = AVCaptureDevice.default(for:AVMediaType.video) {
do {
try captureDevice.lockForConfiguration()
captureDevice.isSubjectAreaChangeMonitoringEnabled = true
captureDevice.focusMode = AVCaptureDevice.FocusMode.continuousAutoFocus
captureDevice.exposureMode = AVCaptureDevice.ExposureMode.continuousAutoExposure
captureDevice.unlockForConfiguration()
} catch {
// Handle errors here
print("There was an error focusing the device's camera")
}
}
My MTKView is based off code written by Simon Gladman, with some edits for performance and to scale the render before it is scaled up to the width of the screen using Core Animation suggested by Apple.
class MetalImageView: MTKView
{
let colorSpace = CGColorSpaceCreateDeviceRGB()
var textureCache: CVMetalTextureCache?
var sourceTexture: MTLTexture!
lazy var commandQueue: MTLCommandQueue =
{
[unowned self] in
return self.device!.makeCommandQueue()
}()!
lazy var ciContext: CIContext =
{
[unowned self] in
return CIContext(mtlDevice: self.device!)
}()
override init(frame frameRect: CGRect, device: MTLDevice?)
{
super.init(frame: frameRect,
device: device ?? MTLCreateSystemDefaultDevice())
if super.device == nil
{
fatalError("Device doesn't support Metal")
}
CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, self.device!, nil, &textureCache)
framebufferOnly = false
enableSetNeedsDisplay = true
isPaused = true
preferredFramesPerSecond = 30
}
required init(coder: NSCoder)
{
fatalError("init(coder:) has not been implemented")
}
// The image to display
var image: CIImage?
{
didSet
{
setNeedsDisplay()
}
}
override func draw(_ rect: CGRect)
{
guard var
image = image,
let targetTexture:MTLTexture = currentDrawable?.texture else
{
return
}
let commandBuffer = commandQueue.makeCommandBuffer()
let customDrawableSize:CGSize = drawableSize
let bounds = CGRect(origin: CGPoint.zero, size: customDrawableSize)
let originX = image.extent.origin.x
let originY = image.extent.origin.y
let scaleX = customDrawableSize.width / image.extent.width
let scaleY = customDrawableSize.height / image.extent.height
let scale = min(scaleX*IVScaleFactor, scaleY*IVScaleFactor)
image = image
.transformed(by: CGAffineTransform(translationX: -originX, y: -originY))
.transformed(by: CGAffineTransform(scaleX: scale, y: scale))
ciContext.render(image,
to: targetTexture,
commandBuffer: commandBuffer,
bounds: bounds,
colorSpace: colorSpace)
commandBuffer?.present(currentDrawable!)
commandBuffer?.commit()
}
}
My AVCaptureSession (captureSession) and AVCaptureVideoDataOutput (videoOutput) are setup below:
func setupCameraAndMic(){
let backCamera = AVCaptureDevice.default(for:AVMediaType.video)
var error: NSError?
var videoInput: AVCaptureDeviceInput!
do {
videoInput = try AVCaptureDeviceInput(device: backCamera!)
} catch let error1 as NSError {
error = error1
videoInput = nil
print(error!.localizedDescription)
}
if error == nil &&
captureSession!.canAddInput(videoInput) {
guard CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, MetalDevice, nil, &textureCache) == kCVReturnSuccess else {
print("Error: could not create a texture cache")
return
}
captureSession!.addInput(videoInput)
setDeviceFrameRateForCurrentFilter(device:backCamera)
stillImageOutput = AVCapturePhotoOutput()
if captureSession!.canAddOutput(stillImageOutput!) {
captureSession!.addOutput(stillImageOutput!)
let q = DispatchQueue(label: "sample buffer delegate", qos: .default)
videoOutput.setSampleBufferDelegate(self, queue: q)
videoOutput.videoSettings = [
kCVPixelBufferPixelFormatTypeKey as AnyHashable as! String: NSNumber(value: kCVPixelFormatType_32BGRA),
kCVPixelBufferMetalCompatibilityKey as String: true
]
videoOutput.alwaysDiscardsLateVideoFrames = true
if captureSession!.canAddOutput(videoOutput){
captureSession!.addOutput(videoOutput)
}
captureSession!.startRunning()
}
}
setDefaultFocusAndExposure()
}
The video and mic are recorded on two separate streams. Details on the microphone and recording video have been left out since my focus is performance of live camera output.
UPDATE - I have a simplified test project on GitHub that makes it a lot easier to test the problem I'm having: https://github.com/PunchyBass/Live-Filter-test-project
From the top of my mind, you are not comparing pears with pears, even if you are running with the 2.49 GHz of A12 against 1.85 GHz of A9, the differences between the cameras are also huge, even if you use them with the same parameters there are several features from XS's camera that require more CPU resources (dual camera, stabilization, smart HDR, etc).
Sorry for the sources, I tried to find metrics of the CPU cost of those features, but I couldn't find it, unfortunately for your needs, that information is not relevant for marketing, when they are selling it as the best camera ever for an smartphone.
They are selling it as the best processor as well, we don't know what would happen using the XS camera with an A9 processor, it would probably crash, we will never know...
PS.... Your metrics are for the whole processor or for the used core? For the whole processor, you also need to consider other tasks that the devices can be executing, for the single core, is 21% of 200% against 39% of 600%

3D Object is not visible in real view in Swift

I'm trying to display a 3D drone object in through my camera. I have create ARSceneView and configure it properly and created a scene. I have also properly pass the object to that sceneView but when i run my app the object does not show any where, i have set its positioning also but still not getting the object anywhere. How can i see my object?My code to set scene and configuration is this,
func setupScene(){
let scene = SCNScene()
arView.scene = scene
}
func setupConfiguration(){
let configure = ARWorldTrackingConfiguration()
arView.session.run(configure)
}
func addDrone() {
drone.loadModel()
arView.scene.rootNode.addChildNode(drone)
}
This is my drone class for making the object as a childnode,
class Drone: SCNNode {
func loadModel() {
guard let virtualObjectScene = SCNScene(named: "Drone.scn") else { return }
let wrapperNode = SCNNode()
for child in virtualObjectScene.rootNode.childNodes {
wrapperNode.addChildNode(child)
}
addChildNode(wrapperNode)
}
}
How can i get to see that?
Looking at your code it looks like your Drone Class never gets initialised.
If your SCNScene is in an .scnassets folder you also need to include that in your path.
Lets say therefore, that you have a folder named: ARAssets.scnassets
Your Drone Class should look like this:
class Drone: SCNNode{
override init() {
super.init()
guard let virtualObjectScene = SCNScene(named: "ARAssets.scnassets/Drone.scn") else { return }
let wrapperNode = SCNNode()
for child in virtualObjectScene.rootNode.childNodes {
wrapperNode.addChildNode(child)
}
self.addChildNode(wrapperNode)
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
And to actually load it you should do something like this in your viewController:
func addDrone() {
let droneNode = Drone()
droneNode.position = SCNVector3(0, 0, -1.5)
self.augmentedRealityView.scene.rootNode.addChildNode(droneNode)
}
You may also need to adjust the scale property of your Drone Node as well e.g
droneNode.scale = SCNVector3(0.01, 0.01. 0.01)

Converting UIKit to SceneKit

Trouble converting program from UIKit to SceneKit. Biggest difficulty for me is understanding how delegate file, Tile, synched with array, Board, is set up with SceneKit. It is a simple project. A screenshot: http://imgur.com/9hsv7X5. It displays a 3 x 5 array. User taps an item and it becomes highlighted. Then tap another item, it becomes highlighted, previous item, unhighlighted.
Here is the UIKit project composed of 3 files:
VIEWCONTROLLER
import UIKit
struct BoardLoc {
var x: Int
var y: Int
}
class ViewController: UIViewController, TileDelegate {
var tile: Tile!
override func viewDidLoad() {
super.viewDidLoad()
let scene = Board()
tile.tileDelegate = self
tile.board = scene
}
func getTileAtLoc(tile: Tile, _ boardLoc: BoardLoc) {
tile.boardLoc = boardLoc
}
}
BOARD
import Foundation
class Board {
var board: Array<Array<String>> = Array(count:3, repeatedValue:Array(count:5, repeatedValue:"foo"))
func putTileAt(boardLoc: BoardLoc) -> String {
return board[boardLoc.x][boardLoc.y]
}
}
TILE
import UIKit
protocol TileDelegate {
func getTileAtLoc(tile: Tile, _ boardLoc: BoardLoc)
}
class Tile: UIView {
var boardLoc: BoardLoc?
var board: Board?
var tileDelegate: TileDelegate?
required init(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
addGestureRecognizer(UITapGestureRecognizer(target: self, action:"handleTap:"))
}
override func drawRect(rect: CGRect) {
for x in 0...2 {
for y in 0...4 {
let context = UIGraphicsGetCurrentContext()
let red = UIColor.redColor().CGColor
let orange = UIColor.orangeColor().CGColor
let bigCircle = CGRectMake(CGFloat(106 * x),CGFloat(106 * y), 106, 106)
let smallCircle = CGRectMake(CGFloat(106 * x) + 3, CGFloat(106 * y) + 3, 100, 100)
if (boardLoc != nil && boardLoc!.x == x && boardLoc!.y == y) {
CGContextSetFillColorWithColor(context, red)
CGContextFillEllipseInRect(context, bigCircle)
}
if board!.putTileAt(BoardLoc(x: x, y: y)) == "foo" {
CGContextSetFillColorWithColor(context, orange)
CGContextFillEllipseInRect(context, smallCircle)
}
}
}
}
func handleTap(gestureRecognizer: UIGestureRecognizer) {
let point = gestureRecognizer.locationInView(self)
let boardLoc = BoardLoc(x: Int(point.x) / 106, y: Int(point.y) / 106)
tileDelegate!.getTileAtLoc(self, boardLoc)
setNeedsDisplay()
}
}
First of all, I recommend you to read Apple SceneKit document and some tutorials.
Scene Kit is a 3D-rendering Objective-C framework that combines a high-performance rendering engine with a high-level, descriptive API. Scene Kit supports the import, manipulation, and rendering of 3D assets without requiring the exact steps to render a scene the way OpenGL does.
http://www.objc.io/issue-18/scenekit.html
https://www.weheartswift.com/introduction-scenekit-part-1/
http://www.raywenderlich.com/83748/beginning-scene-kit-tutorial
http://tacow.org/assets/attachments/SceneKit.pdf
Scene Kit allows you to render 3D scene easily, without OpenGL ES APIs. However you should understand how Scene Kit works.
Basically, Scene Kit provides a view controller that maintains an animation loop. This loop follows a design pattern common in games and simulations, with two phases: update and render. In the implementation, Scene Kit has more phases like the following figure (from http://www.objc.io/issue-18/scenekit.html), but basically, two phases, update and render.
So how to create Scene Kit project, the basics is
Prepare SCNView
Initialize 3D scene
Create touch event handler
Implement Update phase: Update game board using the touched object or the touched position, Update the animation of the objects, or some sort of stuff.
Implement Render phase: Basically, Scene Kit automatically renders registered 3D objects and models.
Thus, you should implement as the following.
Use SCNView instead of ViewController
Create a scene
Place Board and Tiles as Scene Kit 3D objects
Use hitTest for touching Tile and update Tiles in Update phase
import SceneKit
class GameViewController: UIViewController {
struct BoardLoc {
var x: Int
var y: Int
}
enum Type {
case Yellow
case Orange
}
var boardArray: Array<Array<Type>> = []
override func viewDidLoad() {
super.viewDidLoad()
for x in 0...2 {
boardArray.append(Array(count:5, repeatedValue:Type.Orange))
for y in 0...4 {
boardArray[x][y] = Type.Orange
}
}
let scene = SCNScene(named: "art.scnassets/balls8.dae")
let scnView = self.view as SCNView
scnView.scene = scene
scnView.autoenablesDefaultLighting = true
let taps = NSMutableArray()
let tap = UITapGestureRecognizer(target: self, action: "handleTap:")
taps.addObject(tap)
scnView.gestureRecognizers = taps
}
func handleTap(gestureRecognizer: UIGestureRecognizer) {
let scnView = view as SCNView
let point = gestureRecognizer.locationInView(scnView)
if let hitResults = scnView.hitTest(point, options: nil) {
if hitResults.count > 0 {
let result: AnyObject! = hitResults[0]
if !result.node!.name!.hasPrefix("Orange") {
return
}
let tapLoc = BoardLoc(x: Int(point.x) / 106, y: Int(point.y) / 106)
boardArray[tapLoc.x][tapLoc.y] = Type.Yellow
for col in 0...2 {
for row in 0...4 {
var yellowBall = scnView.scene!.rootNode.childNodeWithName("Yellow", recursively: true)
var secxtorX = Float(col) * 16.5 - 16
var sectorY = 34 - (Float(row) * 16.5)
if boardArray[col][row] == Type.Yellow {
yellowBall!.runAction(SCNAction.moveTo(SCNVector3(x: secxtorX, y: sectorY, z: 25), duration: 0.01))
boardArray[tapLoc.x][tapLoc.y] = Type.Orange
}
}
}
}
}
}
}