import PlaygroundSupport
import MetalKit
guard let device = MTLCreateSystemDefaultDevice() else {
fatalError("GPU is not supported")
}
let frame = CGRect(x: 0, y: 0, width: 600, height: 600)
let view = MTKView(frame: frame, device:device)
view.clearColor = MTLClearColor(red: 1, green: 1, blue: 0.8, alpha: 1)
//1
let allocator = MTKMeshBufferAllocator(device: device)
//2
let mdlMesh = MDLMesh(sphereWithExtent: [0.75, 0.75, 0.75], segments: [100, 100], inwardNormals: false, geometryType: .triangles, allocator: allocator)
//3
let mesh = try MTKMesh(mesh: mdlMesh, device: device)
guard let commandQueue = device.makeCommandQueue() else {
fatalError("Could not create a command queue")
}
//shader
let shader = """
#include <metal_stdlib>
using namespace metal;
struct VertexIn {
float4 position [[attribute(0)]];
};
vertex float4 vertex_main(const VertexIn vertex_in [[stage_in]])
{
return vertex_in.position;
}
fragment float4 fragment_main() {
return float4(1, 0, 0, 1);
}
"""
let library = try device.makeLibrary(source: shader, options: nil)
let vertexFunction = library.makeFunction(name: "vertex_main")
let fragmentFunction = library.makeFunction(name: "fragment_main")
//Pipeline
let pipelineDescriptor = MTLRenderPipelineDescriptor()
pipelineDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
pipelineDescriptor.vertexFunction = vertexFunction
pipelineDescriptor.fragmentFunction = fragmentFunction
pipelineDescriptor.vertexDescriptor = MTKMetalVertexDescriptorFromModelIO(mesh.vertexDescriptor)
let pipelineState = try device.makeRenderPipelineState(descriptor: pipelineDescriptor)
// 1
guard let commandBuffer = commandQueue.makeCommandBuffer(),
//2
let renderPassDescriptor = view.currentRenderPassDescriptor,
//3
let renderEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor)
else { fatalError() }
guard let submesh = mesh.submeshes.first else {
fatalError()
}
//Here is where crashed.
renderEncoder.drawIndexedPrimitives(type: .triangle,
indexCount: submesh.indexCount,
indexType: submesh.indexType,
indexBuffer: submesh.indexBuffer.buffer,
indexBufferOffset: 0)
//1.u tell the render encoder that there are no more draw calls and end the render pass.
renderEncoder.endEncoding()
//2 u get the drawable from the MTKView.The MTKView is backed by a Core Animation CAMetalLayer and the layer owns a drawable texture which Metal can read and write to.
guard let drawable = view.currentDrawable else {
fatalError()
}
//3.Ask the command Buffer to present the MTKVIew's drawable and commit to the GPU
commandBuffer.present(drawable)
commandBuffer.commit()
PlaygroundPage.current.liveView = view
error: Execution was interrupted, reason: EXC_BAD_ACCESS (code=1, address=0x220).
The process has been left at the point where it was interrupted, use "thread return -x" to return to the state before expression evaluation.
I rewrite again in Xcode but still can not find where the problem is.
error: Execution was interrupted, reason: EXC_BAD_ACCESS (code=1, address=0x220). The process has been left at the point where it was interrupted, use "thread return -x" to return to the state before expression evaluation.
I rewrite again in Xcode but still can not find where the problem is.
error: Execution was interrupted, reason: EXC_BAD_ACCESS (code=1, address=0x220). The process has been left at the point where it was interrupted, use "thread return -x" to return to the state before expression evaluation.
I rewrite again in Xcode but still can not find where the problem is.
First there seems to be some XCode bug which forbids giving useful error information once your code crashes more than once, you just quit and restart it.
Now coming to main issues, you missed couple of things:
Setting render pipeline state on command encoder so that your shaders actually get to run by GPU
And setting vertex buffer to command encoder, so that GPU know what points of the mesh are needed to be rendered
I have updated your code below, also, you don't have to create most of those objects like pipeline, library, command queue every time. I'm assuming you are doing so just because it's a playground. I tried to add some basic structure to it in my sample below, you build upon it, like use MTKView delgates etc.
import UIKit
import PlaygroundSupport
import MetalKit
class BasicRenderer {
var mesh: MTKMesh
var pipelineState: MTLRenderPipelineState
var device: MTLDevice
var commandQueue: MTLCommandQueue
var metalView: MTKView
init() {
guard let device = MTLCreateSystemDefaultDevice() else {
fatalError("Can't create device")
}
self.device = device
guard let commandQueue = device.makeCommandQueue() else {
fatalError("Can't create commandQueue")
}
self.commandQueue = commandQueue
metalView = MTKView(frame: .zero, device: device)
metalView.clearColor = MTLClearColor(red: 1, green: 1, blue: 0.8, alpha: 1)
let meshAllocator = MTKMeshBufferAllocator(device: device)
let mdlMesh = MDLMesh(sphereWithExtent: [0.75, 0.75, 0.75],
segments: [100, 100],
inwardNormals: false,
geometryType: .triangles,
allocator: meshAllocator)
guard let mesh = try? MTKMesh(mesh: mdlMesh, device: device) else {
fatalError("Can't create mesh")
}
self.mesh = mesh
let shader = """
#include <metal_stdlib>
using namespace metal;
struct VertexIn {
float4 position [[attribute(0)]];
};
vertex float4 vertex_main(const VertexIn vertex_in [[stage_in]])
{
return vertex_in.position;
}
fragment float4 fragment_main() {
return float4(1, 0, 0, 1);
}
"""
guard let library = try? device.makeLibrary(source: shader, options: nil) else {
fatalError("Can't create library")
}
let vertexFunction = library.makeFunction(name: "vertex_main")
let fragmentFunction = library.makeFunction(name: "fragment_main")
let pipelineDescriptor = MTLRenderPipelineDescriptor()
pipelineDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
pipelineDescriptor.vertexFunction = vertexFunction
pipelineDescriptor.fragmentFunction = fragmentFunction
pipelineDescriptor.vertexDescriptor = MTKMetalVertexDescriptorFromModelIO(mesh.vertexDescriptor)
guard let pipelineState = try? device.makeRenderPipelineState(descriptor: pipelineDescriptor) else {
fatalError("Can't create pipelineState")
}
self.pipelineState = pipelineState
}
func render(in size: CGSize) {
metalView.frame = CGRect(origin: .zero, size: size)
guard let commandBuffer = commandQueue.makeCommandBuffer(),
let renderPassDescriptor = metalView.currentRenderPassDescriptor,
let renderEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor)
else {
fatalError("Can't create commandBuffer/renderPassDescriptor/renderEncoder")
}
guard let subMesh = mesh.submeshes.first else {
fatalError("Can't get submesh")
}
guard let vertexBuffer = mesh.vertexBuffers.first?.buffer else {
fatalError("Can't get vertexBuffer")
}
// MARK: You missed setting pipeline state and vertex buffer on commandEncoder
renderEncoder.setRenderPipelineState(pipelineState)
renderEncoder.setVertexBuffer(vertexBuffer, offset: 0, index: 0)
renderEncoder.drawIndexedPrimitives(type: .triangle,
indexCount: subMesh.indexCount,
indexType: subMesh.indexType,
indexBuffer: subMesh.indexBuffer.buffer,
indexBufferOffset: 0)
renderEncoder.endEncoding()
guard let drawable = metalView.currentDrawable else {
fatalError("Can't get currentDrawable")
}
commandBuffer.present(drawable)
commandBuffer.commit()
}
}
class MyViewController : UIViewController {
var basicRenderer = BasicRenderer()
override func loadView() {
self.view = basicRenderer.metalView
basicRenderer.render(in: CGSize(width: 600, height: 600))
}
}
PlaygroundPage.current.liveView = MyViewController()
Related
I'm following a tutorial - https://www.raywenderlich.com/7475-metal-tutorial-getting-started - to learn how to use metal. I've done exactly what the tutorial said to do and no errors pop up before I try to build it, then it says build failed, along with the error - cannot load module 'metal' as 'Metal'.
I can't find an answer anywhere else, so can someone help me fix this? I'm new to coding and I'm expecting this to have a straightforward solution.
Edit: So I just discovered that there was indeed a very simple solution. I hadn't downloaded the materials for the tutorial. But now that I have, another error has shown up, saying Use of unresolved identifier 'vertexBuffer'
Here's my entire code, just to resolve any confusion -
viewControllerer.swift -
import Metal
var device: MTLDevice!
var metalLayer: CAMetalLayer!
var pipelineState: MTLRenderPipelineState!
var commandQueue: MTLCommandQueue!
var timer: CADisplayLink!
class ViewController: UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
metalLayer = CAMetalLayer() // 1
metalLayer.device = device // 2
metalLayer.pixelFormat = .bgra8Unorm // 3
metalLayer.framebufferOnly = true // 4
metalLayer.frame = view.layer.frame // 5
view.layer.addSublayer(metalLayer) // 6
let dataSize = vertexData.count * MemoryLayout.size(ofValue: vertexData[0]) // 1
vertexBuffer = device.makeBuffer(bytes: vertexData, length: dataSize, options: []) // 2
// 1
let defaultLibrary = device.makeDefaultLibrary()!
let fragmentProgram = defaultLibrary.makeFunction(name: "basic_fragment")
let vertexProgram = defaultLibrary.makeFunction(name: "basic_vertex")
// 2
let pipelineStateDescriptor = MTLRenderPipelineDescriptor()
pipelineStateDescriptor.vertexFunction = vertexProgram
pipelineStateDescriptor.fragmentFunction = fragmentProgram
pipelineStateDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
// 3
pipelineState = try! device.makeRenderPipelineState(descriptor: pipelineStateDescriptor)
device = MTLCreateSystemDefaultDevice()
commandQueue = device.makeCommandQueue()
timer = CADisplayLink(target: self, selector: #selector(gameloop))
timer.add(to: RunLoop.main, forMode: .default)
}
let vertexData: [Float] = [
0.0, 1.0, 0.0,
-1.0, -1.0, 0.0,
1.0, -1.0, 0.0
]
func render() {
guard let drawable = metalLayer?.nextDrawable() else { return }
let renderPassDescriptor = MTLRenderPassDescriptor()
renderPassDescriptor.colorAttachments[0].texture = drawable.texture
renderPassDescriptor.colorAttachments[0].loadAction = .clear
renderPassDescriptor.colorAttachments[0].clearColor = MTLClearColor(
red: 0.0,
green: 104.0/255.0,
blue: 55.0/255.0,
alpha: 1.0)
let commandBuffer = commandQueue.makeCommandBuffer()!
let renderEncoder = commandBuffer
.makeRenderCommandEncoder(descriptor: renderPassDescriptor)!
renderEncoder.setRenderPipelineState(pipelineState)
renderEncoder.setVertexBuffer(vertexBuffer, offset: 0, index: 0)
renderEncoder
.drawPrimitives(type: .triangle, vertexStart: 0, vertexCount: 3, instanceCount: 1)
renderEncoder.endEncoding()
commandBuffer.present(drawable)
commandBuffer.commit()
}
#objc func gameloop() {
autoreleasepool {
self.render()
}
}
}
Shaders.metal
#include <metal_stdlib>
using namespace metal;
vertex float4 basic_vertex( // 1
const device packed_float3* vertex_array [[ buffer(0) ]], // 2
unsigned int vid [[ vertex_id ]]) { // 3
return float4(vertex_array[vid], 1.0); // 4
}
fragment half4 basic_fragment() { // 1
return half4(1.0); // 2
}
Note - Shaders.metal is a file that the tutorial says to create
The compiler detected a similar name to Metal, which is metal. Sometimes libraries change name in their own different versions, that's why it can be different from the tutorial you're following.
try doing as the error suggest and replacing the import with: import metal
You need to import Metal framework as below (mentioned under Creating an MTLDevice),
import Metal
Currently you are doing as below which is wrong,
import metal
You need to have a var vertexBuffer: MTLBuffer variable
I am using this code to render the "Hello Triangle" triangle. On my iPhone, though, the triangle has very rough edges, not smooth edges, like in the example.
import UIKit
import Metal
import MetalKit
import simd
class MBEMetalView: UIView {
// // // // // MAIN // // // // //
var metalDevice: MTLDevice! = nil
var metalLayer: CAMetalLayer! = nil
var commandQueue: MTLCommandQueue! = nil
var vertexBuffer: MTLBuffer! = nil
var pipelineState: MTLRenderPipelineState! = nil
var displayLink: CADisplayLink! = nil
override class var layerClass : AnyClass {
return CAMetalLayer.self
}
// override func didMoveToWindow() {
// self.redraw()
// }
override func didMoveToSuperview() {
super.didMoveToSuperview()
if self.superview != nil {
self.displayLink = CADisplayLink(target: self, selector: #selector(displayLinkFired))
self.displayLink.add(to: RunLoop.main, forMode: .common)
} else {
self.displayLink.invalidate()
}
}
#objc func displayLinkFired() {
self.redraw()
}
// // // // // INIT // // // // //
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
self.prepareDeviceLayerAndQueue()
self.makeBuffers()
self.makePipeline()
}
func prepareDeviceLayerAndQueue() {
metalLayer = (self.layer as! CAMetalLayer)
metalDevice = MTLCreateSystemDefaultDevice()
metalLayer.device = metalDevice
metalLayer.pixelFormat = .bgra8Unorm
commandQueue = metalDevice.makeCommandQueue()
}
func makeBuffers() {
var vertices: [MBEVertex] = [
MBEVertex(position: vector_float4(0, 0.5, 0, 1) , color: vector_float4(1, 0, 0, 1)),
MBEVertex(position: vector_float4(-0.5, -0.5, 0, 1) , color: vector_float4(0, 1, 0, 1)),
MBEVertex(position: vector_float4(0.5, -0.5, 0, 1) , color: vector_float4(0, 0, 1, 1))
]
self.vertexBuffer = metalDevice.makeBuffer(bytes: &vertices, length: 56, options: .storageModeShared)
}
func makePipeline() {
guard let library = metalDevice.makeDefaultLibrary() else { print("COULD NOT CREATE LIBRARY") ; return }
guard let vertexFunction = library.makeFunction(name: "vertex_main") else { print("COULD NOT CREATE A VERTEX FUNCTION") ; return }
guard let fragmentFunction = library.makeFunction(name: "fragment_main") else { print("COULD NOT CREATE LIBRARY") ; return }
let pipelineDescriptor = MTLRenderPipelineDescriptor()
pipelineDescriptor.vertexFunction = vertexFunction
pipelineDescriptor.fragmentFunction = fragmentFunction
pipelineDescriptor.colorAttachments[0].pixelFormat = metalLayer.pixelFormat
pipelineState = try? metalDevice.makeRenderPipelineState(descriptor: pipelineDescriptor)
if pipelineState == nil { print("COULD NOT CREATE PIPELINE STATE") ; return }
}
// // // // // FUNCTIONS // // // // //
func redraw() {
guard let drawable = metalLayer.nextDrawable() else { print("COULD NOT CREATE A DRAWABLE") ; return }
let texture = drawable.texture
let renderPassDescriptor = MTLRenderPassDescriptor()
renderPassDescriptor.colorAttachments[0].texture = texture
renderPassDescriptor.colorAttachments[0].loadAction = .clear
renderPassDescriptor.colorAttachments[0].storeAction = .store
renderPassDescriptor.colorAttachments[0].clearColor = MTLClearColor(red: 0.1, green: 0.1, blue: 0.1, alpha: 1)
guard let commandBuffer = commandQueue.makeCommandBuffer() else { print("COULD NOT CREATE A COMMAND BUFFER") ; return }
guard let commandEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor) else { print("COULD NOT CREATE AN ENCODER") ; return }
commandEncoder.setRenderPipelineState(pipelineState)
commandEncoder.setVertexBuffer(vertexBuffer, offset: 0, index: 0)
commandEncoder.drawPrimitives(type: .triangle, vertexStart: 0, vertexCount: 3)
commandEncoder.endEncoding()
commandBuffer.present(drawable)
commandBuffer.commit()
}
// // // // // TYPES // // // // //
struct MBEVertex {
var position: vector_float4
var color: vector_float4
}
}
I have tried to render the triangle a few different times with different methods (sometimes use a MetalKit view from interface builder, sometimes create the view manually)... each time, though, the triangle comes out with rough edges.
The main issue here is that the drawable size of your layer is much smaller than the resolution of your screen. You can get them to match by taking the following steps:
When your Metal view moves to a new superview, update its contentsScale property to match that of the hosting display:
layer.contentsScale = self.window?.screen.scale ?? 1.0
Add a property to your view subclass that computes the ideal drawable size based on the bounds of the view and its scale:
var preferredDrawableSize: CGSize {
return CGSize(width: bounds.size.width * layer.contentsScale,
height: bounds.size.height * layer.contentsScale)
}
Update the drawableSize of your layer when you detect that it doesn't match the computed preferred size:
func redraw() {
if metalLayer.drawableSize != preferredDrawableSize {
metalLayer.drawableSize = preferredDrawableSize
}
...
}
By the way, these days there's really no good reason not to use MTKView for this purpose. It abstracts all of these details for you and is much nicer to work with.
So in a main.swift file in your project, you can create a window (and go from there) like this:
let nsapp = NSApplication.shared
let window = NSWindow(
contentRect: NSMakeRect(0, 0, 200, 200),
styleMask: .fullSizeContentView,
backing: NSWindow.BackingStoreType.buffered,
defer: false
)
window.cascadeTopLeft(from:NSMakePoint(20,20))
nsapp.run()
I'm wondering how to do the same thing but with a Metal triangle. I've been looking through github.com/topics/metalkit but the closest thing I've found so far wasn't there but in a gist.
import Cocoa
import MetalKit
#NSApplicationMain
class AppDelegate: NSObject, NSApplicationDelegate, MTKViewDelegate {
weak var window: NSWindow!
weak var metalView: MTKView!
let device = MTLCreateSystemDefaultDevice()!
var commandQueue: MTLCommandQueue!
var pipelineState: MTLRenderPipelineState!
func applicationDidFinishLaunching(_ aNotification: Notification) {
metalView = MTKView(frame: NSRect(origin: CGPoint.zero, size: window.frame.size), device: device)
metalView.delegate = self
window.contentView = metalView
commandQueue = device.makeCommandQueue()
let shaders = """
#include <metal_stdlib>
using namespace metal;
struct VertexIn {
packed_float3 position;
packed_float3 color;
};
struct VertexOut {
float4 position [[position]];
float4 color;
};
vertex VertexOut vertex_main(device const VertexIn *vertices [[buffer(0)]],
uint vertexId [[vertex_id]]) {
VertexOut out;
out.position = float4(vertices[vertexId].position, 1);
out.color = float4(vertices[vertexId].color, 1);
return out;
}
fragment float4 fragment_main(VertexOut in [[stage_in]]) {
return in.color;
}
"""
do {
let library = try device.makeLibrary(source: shaders, options: nil)
let pipelineDescriptor = MTLRenderPipelineDescriptor()
pipelineDescriptor.colorAttachments[0].pixelFormat = metalView.colorPixelFormat
pipelineDescriptor.vertexFunction = library.makeFunction(name: "vertex_main")
pipelineDescriptor.fragmentFunction = library.makeFunction(name: "fragment_main")
pipelineState = try device.makeRenderPipelineState(descriptor: pipelineDescriptor)
} catch {}
}
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {
}
func draw(in view: MTKView) {
guard let commandBuffer = commandQueue.makeCommandBuffer() else { return }
guard let passDescriptor = view.currentRenderPassDescriptor else { return }
guard let encoder = commandBuffer.makeRenderCommandEncoder(descriptor: passDescriptor) else { return }
let vertexData: [Float] = [ -0.5, -0.5, 0, 1, 0, 0,
0.5, -0.5, 0, 0, 1, 0,
0, 0.5, 0, 0, 0, 1 ]
encoder.setVertexBytes(vertexData, length: vertexData.count * MemoryLayout<Float>.stride, index: 0)
encoder.setRenderPipelineState(pipelineState)
encoder.drawPrimitives(type: .triangle, vertexStart: 0, vertexCount: 3)
encoder.endEncoding()
commandBuffer.present(view.currentDrawable!)
commandBuffer.commit()
}
}
It at least builds an MTKView from scratch. But I'm not sure yet what the minimum viable product is for getting a metal thing working without any controllers, delegates, applications, I'm going to start just doing trial and error to get it working but it's going to probably take a few days and thought it might be helpful for others if someone's already figured this out.
I have combined the two but it isn't rendering anything from what I can tell.
import AVFoundation
import AudioToolbox
import Foundation
import QuartzCore
import Security
import WebKit
import Cocoa
import Metal
import MetalKit
import Swift
let device = MTLCreateSystemDefaultDevice()!
// Our clear color, can be set to any color
let clearColor = MTLClearColor(red: 0.1, green: 0.57, blue: 0.25, alpha: 1)
let nsapp = NSApplication.shared
let appName = ProcessInfo.processInfo.processName
let window = NSWindow(
contentRect: NSMakeRect(0, 0, 1000, 1000),
styleMask: .fullSizeContentView,
backing: NSWindow.BackingStoreType.buffered,
defer: false
)
window.cascadeTopLeft(from:NSMakePoint(20,20))
window.title = appName;
window.makeKeyAndOrderFront(nil)
struct Vertex {
var position: float3
var color: float4
}
let view = MTKView(frame: NSRect(origin: CGPoint.zero, size: window.frame.size), device: device)
window.contentView = view
view.device = device
view.colorPixelFormat = .bgra8Unorm
view.clearColor = clearColor
let queue = device.makeCommandQueue()!
var vertexBuffer: MTLBuffer!
var vertices: [Vertex] = [
Vertex(position: float3(0,1,0), color: float4(1,0,0,1)),
Vertex(position: float3(-1,-1,0), color: float4(0,1,0,1)),
Vertex(position: float3(1,-1,0), color: float4(0,0,1,1))
]
let shaders = """
#include <metal_stdlib>
using namespace metal;
// Basic Struct to match our Swift type
// This is what is passed into the Vertex Shader
struct VertexIn {
float3 position;
float4 color;
};
// What is returned by the Vertex Shader
// This is what is passed into the Fragment Shader
struct VertexOut {
float4 position [[ position ]];
float4 color;
};
vertex VertexOut basic_vertex_function(const device VertexIn *vertices [[ buffer(0) ]],
uint vertexID [[ vertex_id ]]) {
VertexOut vOut;
vOut.position = float4(vertices[vertexID].position,1);
vOut.color = vertices[vertexID].color;
return vOut;
}
fragment float4 basic_fragment_function(VertexOut vIn [[ stage_in ]]) {
return vIn.color;
}
"""
let library = try device.makeLibrary(source: shaders, options: nil)
let pipelineDescriptor = MTLRenderPipelineDescriptor()
pipelineDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
pipelineDescriptor.vertexFunction = library.makeFunction(name: "basic_vertex_function")
pipelineDescriptor.fragmentFunction = library.makeFunction(name: "basic_fragment_function")
let pipelineState = try device.makeRenderPipelineState(descriptor: pipelineDescriptor)
vertexBuffer = device.makeBuffer(
bytes: vertices,
length: MemoryLayout<Vertex>.stride * vertices.count,
options: []
)
enum MetalErrors: Error {
case commandBuffer
case passDescriptor
case encoder
}
guard let drawable = view.currentDrawable else { throw MetalErrors.commandBuffer }
guard let commandBuffer = queue.makeCommandBuffer() else { throw MetalErrors.commandBuffer }
guard let passDescriptor = view.currentRenderPassDescriptor else { throw MetalErrors.passDescriptor }
guard let encoder = commandBuffer.makeRenderCommandEncoder(descriptor: passDescriptor) else { throw MetalErrors.encoder }
nsapp.run()
// let vertexData: [Float] = [ -0.5, -0.5, 0, 1, 0, 0,
// 0.5, -0.5, 0, 0, 1, 0,
// 0, 0.5, 0, 0, 0, 1 ]
encoder.setRenderPipelineState(pipelineState)
// encoder.setVertexBytes(vertexData, length: vertexData.count * MemoryLayout<Float>.stride, index: 0)
encoder.drawPrimitives(type: .triangle, vertexStart: 0, vertexCount: vertices.count)
encoder.endEncoding()
commandBuffer.present(drawable)
commandBuffer.commit()
It's blank for me. I tried following this as well.
This is getting closer.
The main problem here is that NSApplication's run method doesn't return until the app terminates, so your render command encoding never happens. You can subclass MTKView and override its draw method to do your drawing instead:
import Cocoa
import MetalKit
let device = MTLCreateSystemDefaultDevice()!
// Our clear color, can be set to any color
let clearColor = MTLClearColor(red: 0.1, green: 0.57, blue: 0.25, alpha: 1)
let shaders = """
#include <metal_stdlib>
using namespace metal;
// Basic Struct to match our Swift type
// This is what is passed into the Vertex Shader
struct VertexIn {
float3 position;
float4 color;
};
// What is returned by the Vertex Shader
// This is what is passed into the Fragment Shader
struct VertexOut {
float4 position [[ position ]];
float4 color;
};
vertex VertexOut basic_vertex_function(const device VertexIn *vertices [[ buffer(0) ]],
uint vertexID [[ vertex_id ]]) {
VertexOut vOut;
vOut.position = float4(vertices[vertexID].position,1);
vOut.color = vertices[vertexID].color;
return vOut;
}
fragment float4 basic_fragment_function(VertexOut vIn [[ stage_in ]]) {
return vIn.color;
}
"""
let library = try device.makeLibrary(source: shaders, options: nil)
let pipelineDescriptor = MTLRenderPipelineDescriptor()
pipelineDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
pipelineDescriptor.vertexFunction = library.makeFunction(name: "basic_vertex_function")
pipelineDescriptor.fragmentFunction = library.makeFunction(name: "basic_fragment_function")
let pipelineState = try device.makeRenderPipelineState(descriptor: pipelineDescriptor)
struct Vertex {
var position: float3
var color: float4
}
let queue = device.makeCommandQueue()!
var vertexBuffer: MTLBuffer!
var vertices: [Vertex] = [
Vertex(position: float3(0,1,0), color: float4(1,0,0,1)),
Vertex(position: float3(-1,-1,0), color: float4(0,1,0,1)),
Vertex(position: float3(1,-1,0), color: float4(0,0,1,1))
]
vertexBuffer = device.makeBuffer(
bytes: vertices,
length: MemoryLayout<Vertex>.stride * vertices.count,
options: []
)
enum MetalErrors: Error {
case commandBuffer
case passDescriptor
case encoder
}
class MyMTKView : MTKView {
override func draw() {
guard let drawable = currentDrawable else { return }
guard let passDescriptor = currentRenderPassDescriptor else { return }
guard let commandBuffer = queue.makeCommandBuffer() else { return }
guard let encoder = commandBuffer.makeRenderCommandEncoder(descriptor: passDescriptor) else { return }
encoder.setRenderPipelineState(pipelineState)
encoder.setVertexBuffer(vertexBuffer, offset: 0, index: 0 )
encoder.drawPrimitives(type: .triangle, vertexStart: 0, vertexCount: vertices.count)
encoder.endEncoding()
commandBuffer.present(drawable)
commandBuffer.commit()
}
}
let nsapp = NSApplication.shared
let appName = ProcessInfo.processInfo.processName
let window = NSWindow(
contentRect: NSMakeRect(0, 0, 1000, 1000),
styleMask: [.titled, .closable, .resizable],
backing: NSWindow.BackingStoreType.buffered,
defer: false
)
window.cascadeTopLeft(from:NSMakePoint(20,20))
window.title = appName;
let view = MyMTKView(frame: NSRect(origin: CGPoint.zero, size: window.frame.size), device: device)
window.contentView = view
view.device = device
view.colorPixelFormat = .bgra8Unorm
view.clearColor = clearColor
window.makeKeyAndOrderFront(nil)
nsapp.run()
UPDATE 6:
I've managed to fix my issue completely but I still would like a better explanation than what I'm guessing is the reason it didn't work if I'm incorrect
I've been trying to animate a sprite sheet over a video but every time I export the video the end result is the sample video I start with.
Here's my code:
First up my custom CALayer to handle my own sprite sheets
class SpriteLayer: CALayer {
var frameIndex: Int
override init() {
// Using 0 as a default state
self.frameIndex = 0
super.init()
}
required init?(coder aDecoder: NSCoder) {
self.frameIndex = 0
super.init(coder: aDecoder)
}
override func display() {
let currentFrameIndex = self.frameIndex
if currentFrameIndex == 0 {
return
}
let frameSize = self.contentsRect.size
self.contentsRect = CGRect(x: 0, y: CGFloat(currentFrameIndex - 1) * frameSize.height, width: frameSize.width, height: frameSize.height)
}
override func action(forKey event: String) -> CAAction? {
if event == "contentsRect" {
return nil
}
return super.action(forKey: event)
}
override class func needsDisplay(forKey key: String) -> Bool {
return key == "frameIndex"
}
}
Gif is a basic class with nothing fancy and works just fine. gif.Strip is a UIImage of a vertical sprite sheet representing the gif.
Now comes the method that should export a new video (it is part of a larger class used for exporting.
func convertAndExport(to url :URL , completion: #escaping () -> Void ) {
// Get Initial info and make sure our destination is available
self.outputURL = url
let stripCgImage = self.gif.strip!.cgImage!
// This is used to time how long the export took
let start = DispatchTime.now()
do {
try FileManager.default.removeItem(at: outputURL)
} catch {
print("Remove Error: \(error.localizedDescription)")
print(error)
}
// Find and load "sample.mp4" as a AVAsset
let videoPath = Bundle.main.path(forResource: "sample", ofType: "mp4")!
let videoUrl = URL(fileURLWithPath: videoPath)
let videoAsset = AVAsset(url: videoUrl)
// Start a new mutable Composition with the same base video track
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)!
let clipVideoTrack = videoAsset.tracks(withMediaType: .video).first!
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
} catch {
print("Insert Error: \(error.localizedDescription)")
print(error)
return
}
compositionVideoTrack.preferredTransform = clipVideoTrack.preferredTransform
// Quick access to the video size
let videoSize = clipVideoTrack.naturalSize
// Setup CALayer and it's animation
let aLayer = SpriteLayer()
aLayer.contents = stripCgImage
aLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
aLayer.opacity = 1.0
aLayer.masksToBounds = true
aLayer.bounds = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
aLayer.contentsRect = CGRect(x: 0, y: 0, width: 1, height: 1.0 / 3.0)
let spriteAnimation = CABasicAnimation(keyPath: "frameIndex")
spriteAnimation.fromValue = 1
spriteAnimation.toValue = 4
spriteAnimation.duration = 2.25
spriteAnimation.repeatCount = .infinity
spriteAnimation.autoreverses = false
spriteAnimation.beginTime = AVCoreAnimationBeginTimeAtZero
aLayer.add(spriteAnimation, forKey: nil)
// Setup Layers for AVVideoCompositionCoreAnimationTool
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(videoLayer)
parentLayer.addSublayer(aLayer)
// Create the mutable video composition
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
// Set the video composition to apply to the composition's video track
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
let videoTrack = mixComposition.tracks(withMediaType: .video).first!
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
// Initialize export session
let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetPassthrough)!
assetExport.videoComposition = videoComp
assetExport.outputFileType = AVFileType.mp4
assetExport.outputURL = self.outputURL
assetExport.shouldOptimizeForNetworkUse = true
// Export
assetExport.exportAsynchronously {
let status = assetExport.status
switch status {
case .failed:
print("Export Failed")
print("Export Error: \(assetExport.error!.localizedDescription)")
print(assetExport.error!)
case .unknown:
print("Export Unknown")
case .exporting:
print("Export Exporting")
case .waiting:
print("Export Waiting")
case .cancelled:
print("Export Cancelled")
case .completed:
let end = DispatchTime.now()
let nanoTime = end.uptimeNanoseconds - start.uptimeNanoseconds
let timeInterval = Double(nanoTime) / 1_000_000_000
// Function is now over, we can print how long it took
print("Time to generate video: \(timeInterval) seconds")
completion()
}
}
}
EDIT:
I based my code on the following links
SpriteLayer and how to use it
CABasicAnimation on a video
Using AVVideoCompositionCoreAnimationTool and AVAssetExportSession to save the new video
UPDATE 1:
I've tried removing the CABasicAnimation part of my code and played around with my CALayer but to no avail. I can't even get the image to show up.
To test things out I tried animating this sprite sheet using a CAKeyframeAnimation on contentsRect in a Xcode Playground and it worked fine so I don't think the issue is with the CABasicAnimation, and maybe not even with the CALayer itself. I could really use some help on this because I don't understand why I can't even get an image to show over my sample video on the export.
UPDATE 2:
In response to matt's comment I've tried forgetting about the sprite sheet for a bit and changed it into a CATextLayer but still not seeing anything on my video (it has dark images so white text should be perfectly visible)
let aLayer = CATextLayer()
aLayer.string = "This is a test"
aLayer.fontSize = videoSize.height / 6
aLayer.alignmentMode = kCAAlignmentCenter
aLayer.foregroundColor = UIColor.white.cgColor
aLayer.bounds = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height / 6)
UPDATE 3:
As per Matt's request I tried changing parentLayer.addSublayer(aLayer) to videoLayer.addSublayer(aLayer) but still nothing changed, but I thought as much because the documentation for the AVVideoCompositionCoreAnimationTool is as follows
convenience init(postProcessingAsVideoLayer videoLayer: CALayer,
in animationLayer: CALayer)
meaning my parentLayer is it's animationLayer and probably means any animations should be done in this layer.
UPDATE 4:
I'm starting to go crazy over here, I've given up for now the idea of showing text or an animated image I just want to affect my video in any way possible so I changed aLayer to this:
let aLayer = CALayer()
aLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
aLayer.backgroundColor = UIColor.white.cgColor
Well, this does absolutely nothing, I still get my sample video at my outputUrl (I started testing this in a playground with the following code if you want to "play" along)
import PlaygroundSupport
import UIKit
import Foundation
import AVFoundation
func convertAndExport(to url :URL , completion: #escaping () -> Void ) {
let start = DispatchTime.now()
do {
try FileManager.default.removeItem(at: url)
} catch {
print("Remove Error: \(error.localizedDescription)")
print(error)
}
let videoPath = Bundle.main.path(forResource: "sample", ofType: "mp4")!
let videoUrl = URL(fileURLWithPath: videoPath)
let videoAsset = AVURLAsset(url: videoUrl)
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)!
let clipVideoTrack = videoAsset.tracks(withMediaType: .video).first!
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
} catch {
print("Insert Error: \(error.localizedDescription)")
print(error)
return
}
compositionVideoTrack.preferredTransform = clipVideoTrack.preferredTransform
let videoSize = clipVideoTrack.naturalSize
print("Video Size Detected: \(videoSize.width) x \(videoSize.height)")
let aLayer = CALayer()
aLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
aLayer.backgroundColor = UIColor.white.cgColor
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(videoLayer)
parentLayer.addSublayer(aLayer)
aLayer.setNeedsDisplay()
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
let videoTrack = mixComposition.tracks(withMediaType: .video).first!
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetPassthrough)!
assetExport.videoComposition = videoComp
assetExport.outputFileType = AVFileType.mp4
assetExport.outputURL = url
assetExport.shouldOptimizeForNetworkUse = true
assetExport.exportAsynchronously {
let status = assetExport.status
switch status {
case .failed:
print("Export Failed")
print("Export Error: \(assetExport.error!.localizedDescription)")
print(assetExport.error!)
case .unknown:
print("Export Unknown")
case .exporting:
print("Export Exporting")
case .waiting:
print("Export Waiting")
case .cancelled:
print("Export Cancelled")
case .completed:
let end = DispatchTime.now()
let nanoTime = end.uptimeNanoseconds - start.uptimeNanoseconds
let timeInterval = Double(nanoTime) / 1_000_000_000
print("Time to generate video: \(timeInterval) seconds")
completion()
}
}
}
let outputUrl = FileManager.default.temporaryDirectory.appendingPathComponent("test.mp4")
convertAndExport(to: outputUrl) {
print(outputUrl)
}
Please someone help me understand what I'm doing wrong...
UPDATE 5:
I am running everything except playground tests from an iPad Air 2 (so no simulator) because I use the camera to take pictures and then stitch them into a sprite sheet I then planned on animating on a video I would send by email. I started doing Playground testing because every test from the iPad required me to go through the whole app cycle (countdown, photos, form, email sending/receiving)
Ok, Finally got it to work as I always wanted it to.
First off even if he deleted his comments, thanks to Matt for the link to a working example that helped me piece together what was wrong with my code.
First off
let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetPassthrough)!
I needed to use AVAssetExportPresetHighestQuality instead of AVAssetExportPresetPassthrough. My guess is that the passthrough preset means you don't do any re-encoding so setting it to highest (not medium because my exported video is of over 400x400) made it so that I could actually re-encode my video. I'm guessing this is what was stopping the exported video from containing any of the CALayer I was trying out (even covering the video in white).
Secondly (not sure if this affects really but I'll try later)
parentLayer.addSublayer(aLayer)
I replaced this with:
videoLayer.addSublayer(aLayer)
Not sure if this really mattered but my understanding was that this was actually the animation layer for AVVideoCompositionCoreAnimationTool and parentLayer was just a container not meant to contain more than this, but I'm likely wrong.
Third change I did
let spriteAnimation = CABasicAnimation(keyPath: "frameIndex")
spriteAnimation.fromValue = 1
spriteAnimation.toValue = 4
spriteAnimation.duration = 2.25
spriteAnimation.repeatCount = .infinity
spriteAnimation.autoreverses = false
spriteAnimation.beginTime = AVCoreAnimationBeginTimeAtZero
aLayer.add(spriteAnimation, forKey: nil)
I changed it to this:
let animation = CAKeyframeAnimation(keyPath: #keyPath(CALayer.contentsRect))
animation.duration = 2.25
animation.calculationMode = kCAAnimationDiscrete
animation.repeatCount = .infinity
animation.values = [
CGRect(x: 0, y: 0, width: 1, height: 1/3.0),
CGRect(x: 0, y: 1/3.0, width: 1, height: 1/3.0),
CGRect(x: 0, y: 2/3.0, width: 1, height: 1/3.0)
] as [CGRect]
animation.beginTime = AVCoreAnimationBeginTimeAtZero
animation.fillMode = kCAFillModeBackwards
animation.isRemovedOnCompletion = false
aLayer.add(animation, forKey: nil)
This change was mainly removing my custom animations for the sprite sheet (since it will always be the same I first wanted a working example then I'll generalise it and probably add it to my private UI Pod). But most importantly animation.isRemovedOnCompletion = false I noticed that removing this makes it so the animation simply does not play on the exported video. So for anyone with CABasicAnimation not animating on the video after an export, try looking if your isRemovedOnCompletion is set correctly on your animation.
I think that's pretty much all the changed I did.
Although I technically answered my question my bounty remains to understand how AVVideoCompositionCoreAnimationTool and AVAssetExport work and why I had to do the changes I did to finally get it to work if anyone is interested in explaining.
Thanks again to Matt, you helped me out by showing me how you did it.
I followed this tutorial, extrapolating from iOS to OS X and everything compiles just fine, except I don't get anything rendered (even the clear color) without any errors. Could anyone please take a look and tell me what am I doing wrong here? I couldn't test this out on iOS like in the tutorial because iOS Simulator doesn't support Metal yet.
I have a custom view for Metal Rendering.
I'm not adding a sublayer (like in the tutorial) because the layer is nil. I suppose I need to activate it somehow and I don't know how.
import Cocoa
import Metal
import QuartzCore
class MetalView: NSView {
var device: MTLDevice!
var pipelineState: MTLRenderPipelineState!
var commandQueue: MTLCommandQueue!
var renderPassDescriptor: MTLRenderPassDescriptor!
var vertexBuffer: MTLBuffer!
var drawable: CAMetalDrawable {
return (layer as! CAMetalLayer).nextDrawable()!
}
override func awakeFromNib() {
// Device
device = MTLCreateSystemDefaultDevice()
// Layer
let metalLayer = CAMetalLayer()
metalLayer.device = device
metalLayer.pixelFormat = .BGRA8Unorm
metalLayer.framebufferOnly = true
metalLayer.frame = frame
layer = metalLayer
// Pipeline State
let defaultLibrary = device.newDefaultLibrary()
let fragmentProgram = defaultLibrary!.newFunctionWithName("basic_fragment")
let vertexProgram = defaultLibrary!.newFunctionWithName("basic_vertex")
let pipelineStateDescriptor = MTLRenderPipelineDescriptor()
pipelineStateDescriptor.vertexFunction = vertexProgram
pipelineStateDescriptor.fragmentFunction = fragmentProgram
pipelineStateDescriptor.colorAttachments[0].pixelFormat = .BGRA8Unorm
do
{
try pipelineState = device.newRenderPipelineStateWithDescriptor(pipelineStateDescriptor)
}
catch let error as NSError {
NSLog("Failed to create pipeline state, error \(error)")
}
// Command Queue
commandQueue = device.newCommandQueue()
// Render Pass Descriptor
renderPassDescriptor = MTLRenderPassDescriptor()
renderPassDescriptor.colorAttachments[0].texture = drawable.texture
renderPassDescriptor.colorAttachments[0].loadAction = .Clear
renderPassDescriptor.colorAttachments[0].clearColor = MTLClearColor(red: 0.75, green: 0.5, blue: 0.0, alpha: 1.0)
// Vertex Buffer
let vertexData:[Float] = [
0.0, 1.0, 0.0,
-1.0, -1.0, 0.0,
1.0, -1.0, 0.0
]
vertexBuffer = device.newBufferWithBytes(vertexData, length: vertexData.count * sizeofValue(vertexData[0]), options: MTLResourceOptions())
}
override func drawRect(dirtyRect: NSRect) {
let commandBuffer = commandQueue.commandBuffer()
let renderEncoder = commandBuffer.renderCommandEncoderWithDescriptor(renderPassDescriptor)
renderEncoder.setRenderPipelineState(pipelineState)
renderEncoder.setVertexBuffer(vertexBuffer, offset: 0, atIndex: 0)
renderEncoder.drawPrimitives(.Triangle, vertexStart: 0, vertexCount: 3, instanceCount: 1)
renderEncoder.endEncoding()
commandBuffer.presentDrawable(drawable)
commandBuffer.commit()
}
}
And I have the following shader code:
#include <metal_stdlib>
using namespace metal;
struct VertexIn
{
packed_float3 position;
};
vertex float4 basic_vertex(
const device VertexIn* vertex_array [[ buffer(0) ]],
unsigned int vertex_id [[ vertex_id ]])
{
return float4(vertex_array[vertex_id].position, 1.0);
}
fragment half4 basic_fragment()
{
return half4(1.0);
}
Directly setting the view's layer property is necessary but not sufficient for creating a so-called layer-hosting view. You can read about the distinction between layer hosting and layer backing here.
You almost certainly want a layer-backed view. To do this, override the makeBackingLayer method, in which you will create and configure your Metal layer and then return it. Then, early in your view's lifecycle (ideally in its initializer(s)), set the wantsLayer property to YES. This should be sufficient for getting your layer on the screen.