Save ARFaceGeometry to OBJ file - swift

In an iOS ARKit app, I've been trying to save the ARFaceGeometry data to an OBJ file. I followed the explanation here: How to make a 3D model from AVDepthData?. However, the OBJ isn't created correctly. Here's what I have:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let faceAnchor = anchor as? ARFaceAnchor else { return }
currentFaceAnchor = faceAnchor
// If this is the first time with this anchor, get the controller to create content.
// Otherwise (switching content), will change content when setting `selectedVirtualContent`.
if node.childNodes.isEmpty, let contentNode = selectedContentController.renderer(renderer, nodeFor: faceAnchor) {
node.addChildNode(contentNode)
}
// https://stackoverflow.com/questions/52953590/how-to-make-a-3d-model-from-avdepthdata
let geometry = faceAnchor.geometry
let allocator = MDLMeshBufferDataAllocator()
let vertices = allocator.newBuffer(with: Data(fromArray: geometry.vertices), type: .vertex)
let textureCoordinates = allocator.newBuffer(with: Data(fromArray: geometry.textureCoordinates), type: .vertex)
let triangleIndices = allocator.newBuffer(with: Data(fromArray: geometry.triangleIndices), type: .index)
let submesh = MDLSubmesh(indexBuffer: triangleIndices, indexCount: geometry.triangleIndices.count, indexType: .uInt16, geometryType: .triangles, material: MDLMaterial(name: "mat1", scatteringFunction: MDLPhysicallyPlausibleScatteringFunction()))
let vertexDescriptor = MDLVertexDescriptor()
// Attributes
vertexDescriptor.addOrReplaceAttribute(MDLVertexAttribute(name: MDLVertexAttributePosition, format: .float3, offset: 0, bufferIndex: 0))
vertexDescriptor.addOrReplaceAttribute(MDLVertexAttribute(name: MDLVertexAttributeNormal, format: .float3, offset: MemoryLayout<float3>.stride, bufferIndex: 0))
vertexDescriptor.addOrReplaceAttribute(MDLVertexAttribute(name: MDLVertexAttributeTextureCoordinate, format: .float2, offset: MemoryLayout<float3>.stride + MemoryLayout<float3>.stride, bufferIndex: 0))
// Layouts
vertexDescriptor.layouts.add(MDLVertexBufferLayout(stride: MemoryLayout<float3>.stride + MemoryLayout<float3>.stride + MemoryLayout<float2>.stride))
let mdlMesh = MDLMesh(vertexBuffers: [vertices, textureCoordinates], vertexCount: geometry.vertices.count, descriptor: vertexDescriptor, submeshes: [submesh])
mdlMesh.addNormals(withAttributeNamed: MDLVertexAttributeNormal, creaseThreshold: 0.5)
let asset = MDLAsset(bufferAllocator: allocator)
asset.add(mdlMesh)
let documentsPath = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!
let exportUrl = documentsPath.appendingPathComponent("face.obj")
try! asset.export(to: exportUrl)
}
The resulting OBJ file looks like this:
# Apple ModelIO OBJ File: face
mtllib face.mtl
g
v -0.000128156 -0.0277879 0.0575149
vn 0 0 0
vt -9.36008e-05 -0.0242016
usemtl material_1
f 1/1/1 1/1/1 1/1/1
f 1/1/1 1/1/1 1/1/1
f 1/1/1 1/1/1 1/1/1
... and many more lines
I would expect many more vertices, and the index values look wrong.

The core issue is that your vertex data isn't described correctly. When you provide a vertex descriptor to Model I/O while constructing a mesh, it represents the layout the data actually has, not your desired layout. You're supplying two vertex buffers, but your vertex descriptor describes an interleaved data layout with only one vertex buffer.
The easiest way to remedy this is to fix the vertex descriptor to reflect the data you're providing:
let vertexDescriptor = MDLVertexDescriptor()
// Attributes
vertexDescriptor.attributes[0] = MDLVertexAttribute(name: MDLVertexAttributePosition,
format: .float3,
offset: 0,
bufferIndex: 0)
vertexDescriptor.attributes[1] = MDLVertexAttribute(name: MDLVertexAttributeTextureCoordinate,
format: .float2,
offset: 0,
bufferIndex: 1)
// Layouts
vertexDescriptor.layouts[0] = MDLVertexBufferLayout(stride: MemoryLayout<float3>.stride)
vertexDescriptor.layouts[1] = MDLVertexBufferLayout(stride: MemoryLayout<float2>.stride)
When you later call addNormals(...), Model I/O will allocate the necessary space and update the vertex descriptor to reflect the new data. Since you're not rendering from the data and are instead immediately exporting it, the internal layout it chooses for the normals isn't important.

Related

Adding UV Map to Model IO MDLMesh

I'm trying to generate UV map for a mesh using Model/IO. The code runs on the simulator and generates a UV map for the input mesh but when I run it on a device it crashes on
mdlMesh.addUnwrappedTextureCoordinates(forAttributeNamed: MDLVertexAttributeTextureCoordinate)
with this error displayed multiple times on the console:
Can't choose for edge creation.
The fatal error that terminates the app is:
libc++abi: terminating with uncaught exception of type std::out_of_range: unordered_map::at: key not found
The code:
let asset = MDLAsset()
let allocator = MTKMeshBufferAllocator(device: device)
let zoneSize = MemoryLayout<Float>.size * 3 * mesh.vertices.count + MemoryLayout<UInt32>.size * indexCount
let zone = allocator.newZone(zoneSize)
let data = Data.init(bytes: vertexBuffer.contents(), count: MemoryLayout<Float>.size * 3 * mesh.vertices.count)
let vBuffer = allocator.newBuffer(from: zone, data: data, type: .vertex)!
let indexData = Data.init(bytes: indexBuffer.contents(), count: MemoryLayout<UInt32>.size * indexCount)
let iBuffer = allocator.newBuffer(from: zone, data: indexData, type: .index)!
let submesh = MDLSubmesh(indexBuffer: iBuffer,
indexCount: indexCount,
indexType: .uint32,
geometryType: .triangles,
material: nil)
let vDescriptor = MDLVertexDescriptor()
// Vertex Positions
vDescriptor.attributes[0] = MDLVertexAttribute(name: MDLVertexAttributePosition,
format: .float3,
offset: 0,
bufferIndex: 0)
vDescriptor.layouts[0] = MDLVertexBufferLayout(stride: MemoryLayout<Float>.size * 3)
let mdlMesh = MDLMesh(vertexBuffer: vBuffer,
vertexCount: mesh.vertices.count,
descriptor: vDescriptor,
submeshes: [submesh])
mdlMesh.addAttribute(withName: MDLVertexAttributeTextureCoordinate, format: .float2)
mdlMesh.addUnwrappedTextureCoordinates(forAttributeNamed: MDLVertexAttributeTextureCoordinate)
asset.add(mdlMesh)

Get SCNGeometry from Model I/O

I am trying to import a Mesh into a SCNGeometry. I do want to manipulate the vertices individually from the CPU. Therefore, I want to do it according to the following post: https://developer.apple.com/forums/thread/91618 .
So far I have imported it into the Model I/O Framework and created a MTLBuffer.
let MDLPositionData = mesh?.vertexAttributeData(forAttributeNamed: "position", as: .float3)
let vertexBuffer1 = device.makeBuffer(bytes: MDLPositionData!.dataStart,
length: MDLPositionData!.bufferSize,
options: [.cpuCacheModeWriteCombined])
let vertexSource = SCNGeometrySource(
buffer: vertexBuffer1!,
vertexFormat: vertexFormat,
semantic: SCNGeometrySource.Semantic.vertex,
vertexCount: mesh!.vertexCount,
dataOffset: 0,
dataStride: MemoryLayout<vector_float3>.size)
The SCNGeometry needs index elements to properly show the mesh. Where do I get those?
I have tried to use the submeshes from Model I/O:
let submesh = mesh?.submeshes?[0]
let indexBuffer = (submesh as? MDLSubmesh)?.indexBuffer(asIndexType: .uInt32)
let indexBufferData = Data(bytes: indexBuffer!.map().bytes, count: indexBuffer!.length)
let indexElement = SCNGeometryElement(
data: indexBufferData,
primitiveType: SCNGeometryPrimitiveType.triangles,
primitiveCount: indexBuffer!.length,
bytesPerIndex: 32)
let geo = SCNGeometry(sources: [vertexSource, normalSource], elements: [indexElement])
But this trows the error
[SceneKit] Error: C3DMeshElementSetPrimitives invalid index buffer size and shows the following geometry: The Teapot. It seems like the vertices aren't connected properly.
How do I get the correct Index data? Thank you!

Getting RGBA values for all pixels of CGImage Swift

I am trying to create a real time video processing app, in which I need to get the RGBA values of all pixels for each frame, and process them using an external library, and show them. I am trying to get the RGBA value for each pixel, but it is too slow the way I am doing it, I was wondering if there is a way to do it faster, using VImage. This is my current code, and the way I get all the pixels, as I get the current frame:
guard let cgImage = context.makeImage() else {
return nil
}
guard let data = cgImage.dataProvider?.data,
let bytes = CFDataGetBytePtr(data) else {
fatalError("Couldn't access image data")
}
assert(cgImage.colorSpace?.model == .rgb)
let bytesPerPixel = cgImage.bitsPerPixel / cgImage.bitsPerComponent
gp.async {
for y in 0 ..< cgImage.height {
for x in 0 ..< cgImage.width {
let offset = (y * cgImage.bytesPerRow) + (x * bytesPerPixel)
let components = (r: bytes[offset], g: bytes[offset + 1], b: bytes[offset + 2])
print("[x:\(x), y:\(y)] \(components)")
}
print("---")
}
}
This is the version using the VImage, but I there is some memory leak, and I can not access the pixels
guard
let format = vImage_CGImageFormat(cgImage: cgImage),
var buffer = try? vImage_Buffer(cgImage: cgImage,
format: format) else {
exit(-1)
}
let rowStride = buffer.rowBytes / MemoryLayout<Pixel_8>.stride / format.componentCount
do {
let componentCount = format.componentCount
var argbSourcePlanarBuffers: [vImage_Buffer] = (0 ..< componentCount).map { _ in
guard let buffer1 = try? vImage_Buffer(width: Int(buffer.width),
height: Int(buffer.height),
bitsPerPixel: format.bitsPerComponent) else {
fatalError("Error creating source buffers.")
}
return buffer1
}
vImageConvert_ARGB8888toPlanar8(&buffer,
&argbSourcePlanarBuffers[0],
&argbSourcePlanarBuffers[1],
&argbSourcePlanarBuffers[2],
&argbSourcePlanarBuffers[3],
vImage_Flags(kvImageNoFlags))
let n = rowStride * Int(argbSourcePlanarBuffers[1].height) * format.componentCount
let start = buffer.data.assumingMemoryBound(to: Pixel_8.self)
var ptr = UnsafeBufferPointer(start: start, count: n)
print(Array(argbSourcePlanarBuffers)[1]) // prints the first 15 interleaved values
buffer.free()
}
You can access the underlying pixels in a vImage buffer to do this.
For example, given an image named cgImage, use the following code to populate a vImage buffer:
guard
let format = vImage_CGImageFormat(cgImage: cgImage),
let buffer = try? vImage_Buffer(cgImage: cgImage,
format: format) else {
exit(-1)
}
let rowStride = buffer.rowBytes / MemoryLayout<Pixel_8>.stride / format.componentCount
Note that a vImage buffer's data may be wider than the image (see: https://developer.apple.com/documentation/accelerate/finding_the_sharpest_image_in_a_sequence_of_captured_images) which is why I've added rowStride.
To access the pixels as a single buffer of interleaved values, use:
do {
let n = rowStride * Int(buffer.height) * format.componentCount
let start = buffer.data.assumingMemoryBound(to: Pixel_8.self)
let ptr = UnsafeBufferPointer(start: start, count: n)
print(Array(ptr)[ 0 ... 15]) // prints the first 15 interleaved values
}
To access the pixels as a buffer of Pixel_8888 values, use (make sure that format.componentCount is 4:
do {
let n = rowStride * Int(buffer.height)
let start = buffer.data.assumingMemoryBound(to: Pixel_8888.self)
let ptr = UnsafeBufferPointer(start: start, count: n)
print(Array(ptr)[ 0 ... 3]) // prints the first 4 pixels
}
This is the slowest way to do it. A faster way is with a custom CoreImage filter.
Faster than that is to write your own OpenGL Shader (or rather, it's equivalent in Metal for current devices)
I've written OpenGL shaders, but have not worked with Metal yet.
Both allow you to write graphics code that runs directly on the GPU.

How to Extract SceneKit Depth Buffer at runtime in AR scene?

How does one extract the SceneKit depth buffer? I make an AR based app that is running Metal and I'm really struggling to find any info on how to extract a 2D depth buffer so I can render out fancy 3D photos of my scenes. Any help greatly appreciated.
Your question is unclear but I'll try to answer.
Depth pass from VR view
If you need to render a Depth pass from SceneKit's 3D environment then you should use, for instance, a SCNGeometrySource.Semantic structure. There are vertex, normal, texcoord, color and tangent type properties. Let's see what a vertex type property is:
static let vertex: SCNGeometrySource.Semantic
This semantic identifies data containing the positions of each vertex in the geometry. For a custom shader program, you use this semantic to bind SceneKit’s vertex position data to an input attribute of the shader. Vertex position data is typically an array of three- or four-component vectors.
Here's a code's excerpt from iOS Depth Sample project.
UPDATED: Using this code you can get a position for every point in SCNScene and assign a color for these points (this is what a zDepth channel really is):
import SceneKit
struct PointCloudVertex {
var x: Float, y: Float, z: Float
var r: Float, g: Float, b: Float
}
#objc class PointCloud: NSObject {
var pointCloud : [SCNVector3] = []
var colors: [UInt8] = []
public func pointCloudNode() -> SCNNode {
let points = self.pointCloud
var vertices = Array(repeating: PointCloudVertex(x: 0,
y: 0,
z: 0,
r: 0,
g: 0,
b: 0),
count: points.count)
for i in 0...(points.count-1) {
let p = points[i]
vertices[i].x = Float(p.x)
vertices[i].y = Float(p.y)
vertices[i].z = Float(p.z)
vertices[i].r = Float(colors[i * 4]) / 255.0
vertices[i].g = Float(colors[i * 4 + 1]) / 255.0
vertices[i].b = Float(colors[i * 4 + 2]) / 255.0
}
let node = buildNode(points: vertices)
return node
}
private func buildNode(points: [PointCloudVertex]) -> SCNNode {
let vertexData = NSData(
bytes: points,
length: MemoryLayout<PointCloudVertex>.size * points.count
)
let positionSource = SCNGeometrySource(
data: vertexData as Data,
semantic: SCNGeometrySource.Semantic.vertex,
vectorCount: points.count,
usesFloatComponents: true,
componentsPerVector: 3,
bytesPerComponent: MemoryLayout<Float>.size,
dataOffset: 0,
dataStride: MemoryLayout<PointCloudVertex>.size
)
let colorSource = SCNGeometrySource(
data: vertexData as Data,
semantic: SCNGeometrySource.Semantic.color,
vectorCount: points.count,
usesFloatComponents: true,
componentsPerVector: 3,
bytesPerComponent: MemoryLayout<Float>.size,
dataOffset: MemoryLayout<Float>.size * 3,
dataStride: MemoryLayout<PointCloudVertex>.size
)
let element = SCNGeometryElement(
data: nil,
primitiveType: .point,
primitiveCount: points.count,
bytesPerIndex: MemoryLayout<Int>.size
)
element.pointSize = 1
element.minimumPointScreenSpaceRadius = 1
element.maximumPointScreenSpaceRadius = 5
let pointsGeometry = SCNGeometry(sources: [positionSource, colorSource], elements: [element])
return SCNNode(geometry: pointsGeometry)
}
}
Depth pass from AR view
If you need to render a Depth pass from ARSCNView it is possible only in case you're using ARFaceTrackingConfiguration for the front-facing camera. If so, then you can employ capturedDepthData instance property that brings you a depth map, captured along with the video frame.
var capturedDepthData: AVDepthData? { get }
But this depth map image is only 15 fps and of lower resolution than corresponding RGB image at 60 fps.
Face-based AR uses the front-facing, depth-sensing camera on compatible devices. When running such a configuration, frames vended by the session contain a depth map captured by the depth camera in addition to the color pixel buffer (see capturedImage) captured by the color camera. This property’s value is always nil when running other AR configurations.
And a real code could be like this:
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
DispatchQueue.global().async {
guard let frame = self.sceneView.session.currentFrame else {
return
}
if let depthImage = frame.capturedDepthData {
self.depthImage = (depthImage as! CVImageBuffer)
}
}
}
}
Depth pass from Video view
Also, you can extract a true Depth pass using 2 back-facing cameras and AVFoundation framework.
Look at Image Depth Map tutorial where Disparity concept will be introduced to you.

How to create a SceneKit SCNSkinner object in code?

I have a Swift app using SceneKit for iOS 8. I load a scene from a .dae file that contains a mesh controlled by a skeleton.
At runtime, I need to modify the texture coordinates. Using a transform is not an option -- I need to compute a different, completely new UV for each vertex in the mesh.
I know geometry is immutable in SceneKit, and I've read that the suggested approach is to make a copy manually. I'm trying to do that, but I always end up crashing when trying to re-create the SCNSkinner in code. The crash is an EXC_BAD_ACCESS inside C3DSourceAccessorGetMutableValuePtrAtIndex. Unfortunately, there is no source code for this, so I'm not sure why exactly it's crashing. I've narrowed it down to the SCNSkinner object attached to the mesh node. If I do not set that, I don't get a crash and things appear to be working.
EDIT: Here is a more complete call stack of the crash:
C3DSourceAccessorGetMutableValuePtrAtIndex
C3DSkinPrepareMeshForGPUIfNeeded
C3DSkinnerMakeCurrentMesh
C3DSkinnerUpdateCurrentMesh
__CFSetApplyFunction_block_invoke
CFBasicHashApply
CFSetApplyFunction
C3DAppleEngineRenderScene
...
I've not found any documentation or example code about how to create an SCNSkinner object manually. Since I'm just creating it based on a previously working mesh, it shouldn't be too difficult. I'm creating the SCNSkinner according to the Swift documentation, passing all of the correct things into the init. However, there is a skeleton property in the SCNSkinner that I'm not sure how to set. I set it to the skeleton that was on the original SCNSkinner of the mesh I'm copying, which I think
should work... but it doesn't. When setting the skeleton property, it does not appear to be assigning. Checking it immediately after the assignment shows that it is still nil. As a test, I tried to set the original mesh's skeleton property to something else, and after the assignment it was left untouched as well.
Can anyone shed any light on what is happening? Or how to correctly create and set up an SCNSkinner object manually?
Here is the code I'm using to manually clone a mesh and replace it with a new one (I have not modified any of the source data here -- I'm simply trying to make sure I can create a copy at this point):
// This is at the start of the app, just so you can see how the scene is set up.
// I add the .dae contents into its own node in the scene. This seems to be the
// standard way to put multiple .dae models into the same scene. This doesn't seem to
// have any impact on the problem I'm having -- I've tried without this indirection
// and the same problem exists.
let scene = SCNScene()
let modelNode = SCNNode()
modelNode.name = "ModelNode"
scene.rootNode.addChildNode(modelNode)
let modelScene = SCNScene(named: "model.dae")
if modelScene != nil {
if let childNodes = modelScene?.rootNode.childNodes {
for childNode in childNodes {
modelNode.addChildNode(childNode as SCNNode)
}
}
}
// This happens later in the app after a tap from the user.
let modelNode = scnView.scene!.rootNode.childNodeWithName("ModelNode", recursively: true)
let modelMesh = modelNode?.childNodeWithName("MeshName", recursively: true)
let verts = modelMesh?.geometry!.geometrySourcesForSemantic(SCNGeometrySourceSemanticVertex)
let normals = modelMesh?.geometry!.geometrySourcesForSemantic(SCNGeometrySourceSemanticNormal)
let texcoords = modelMesh?.geometry!.geometrySourcesForSemantic(SCNGeometrySourceSemanticTexcoord)
let boneWeights = modelMesh?.geometry!.geometrySourcesForSemantic(SCNGeometrySourceSemanticBoneWeights)
let boneIndices = modelMesh?.geometry!.geometrySourcesForSemantic(SCNGeometrySourceSemanticBoneIndices)
let geometry = modelMesh?.geometry!.geometryElementAtIndex(0)
// Note: the vertex and normal data is shared.
let vertsData = NSData(data: verts![0].data)
let texcoordsData = NSData(data: texcoords![0].data)
let boneWeightsData = NSData(data: boneWeights![0].data)
let boneIndicesData = NSData(data: boneIndices![0].data)
let geometryData = NSData(data: geometry!.data!)
let newVerts = SCNGeometrySource(data: vertsData, semantic: SCNGeometrySourceSemanticVertex, vectorCount: verts![0].vectorCount, floatComponents: verts![0].floatComponents, componentsPerVector: verts![0].componentsPerVector, bytesPerComponent: verts![0].bytesPerComponent, dataOffset: verts![0].dataOffset, dataStride: verts![0].dataStride)
let newNormals = SCNGeometrySource(data: vertsData, semantic: SCNGeometrySourceSemanticNormal, vectorCount: normals![0].vectorCount, floatComponents: normals![0].floatComponents, componentsPerVector: normals![0].componentsPerVector, bytesPerComponent: normals![0].bytesPerComponent, dataOffset: normals![0].dataOffset, dataStride: normals![0].dataStride)
let newTexcoords = SCNGeometrySource(data: texcoordsData, semantic: SCNGeometrySourceSemanticTexcoord, vectorCount: texcoords![0].vectorCount, floatComponents: texcoords![0].floatComponents, componentsPerVector: texcoords![0].componentsPerVector, bytesPerComponent: texcoords![0].bytesPerComponent, dataOffset: texcoords![0].dataOffset, dataStride: texcoords![0].dataStride)
let newBoneWeights = SCNGeometrySource(data: boneWeightsData, semantic: SCNGeometrySourceSemanticBoneWeights, vectorCount: boneWeights![0].vectorCount, floatComponents: boneWeights![0].floatComponents, componentsPerVector: boneWeights![0].componentsPerVector, bytesPerComponent: boneWeights![0].bytesPerComponent, dataOffset: boneWeights![0].dataOffset, dataStride: boneWeights![0].dataStride)
let newBoneIndices = SCNGeometrySource(data: boneIndicesData, semantic: SCNGeometrySourceSemanticBoneIndices, vectorCount: boneIndices![0].vectorCount, floatComponents: boneIndices![0].floatComponents, componentsPerVector: boneIndices![0].componentsPerVector, bytesPerComponent: boneIndices![0].bytesPerComponent, dataOffset: boneIndices![0].dataOffset, dataStride: boneIndices![0].dataStride)
let newGeometry = SCNGeometryElement(data: geometryData, primitiveType: geometry!.primitiveType, primitiveCount: geometry!.primitiveCount, bytesPerIndex: geometry!.bytesPerIndex)
let newMeshGeometry = SCNGeometry(sources: [newVerts, newNormals, newTexcoords, newBoneWeights, newBoneIndices], elements: [newGeometry])
newMeshGeometry.firstMaterial = modelMesh?.geometry!.firstMaterial
let newModelMesh = SCNNode(geometry: newMeshGeometry)
let bones = modelMesh?.skinner?.bones
let boneInverseBindTransforms = modelMesh?.skinner?.boneInverseBindTransforms
let skeleton = modelMesh!.skinner!.skeleton!
let baseGeometryBindTransform = modelMesh!.skinner!.baseGeometryBindTransform
newModelMesh.skinner = SCNSkinner(baseGeometry: newMeshGeometry, bones: bones, boneInverseBindTransforms: boneInverseBindTransforms, boneWeights: newBoneWeights, boneIndices: newBoneIndices)
newModelMesh.skinner?.baseGeometryBindTransform = baseGeometryBindTransform
// Before this assignment, newModelMesh.skinner?.skeleton is nil.
newModelMesh.skinner?.skeleton = skeleton
// After, it is still nil... however, skeleton itself is completely valid.
modelMesh?.removeFromParentNode()
newModelMesh.name = "MeshName"
let meshParentNode = modelNode?.childNodeWithName("MeshParentNode", recursively: true)
meshParentNode?.addChildNode(newModelMesh)
This three methods may help you to find the solution:
SCNNode *hero = [SCNScene sceneNamed:#"Hero"].rootNode;
SCNNode *hat = [SCNScene sceneNamed:#"FancyFedora"].rootNode;
hat.skinner.skeleton = hero.skinner.skeleton;
[Export ("initWithFrame:")]
public UIView (System.Drawing.RectangleF frame) : base (NSObjectFlag.Empty)
{
// Invoke the init method now.
var initWithFrame = new Selector ("initWithFrame:").Handle;
if (IsDirectBinding)
Handle = ObjCRuntime.Messaging.IntPtr_objc_msgSend_RectangleF (this.Handle, initWithFrame, frame);
else
Handle = ObjCRuntime.Messaging.IntPtr_objc_msgSendSuper_RectangleF (this.SuperHandle, initWithFrame, frame);
}
See this link as well.
I don't specifically know what causes your code to crash but here is a way of generating a mesh, bones, and skinning that mesh -- all from code. Swift4 and iOS 12.
In the example, there is mesh representing the concatenation of two cylinders, with one of the cylinders branching off at a 45 degree angle, like so:
\
|
The cylinders are just extruded triangles, i.e., radialSegmentCount = 3. (Note that there are 12 vertices, not 9, since the two cylinders aren't really conjoined. The triangles are ordered like this:
v5
^
v3 /__|__\ v1
| | |
| v4 |
v2 |/___\| v0
There are 3 bones, corresponding to the heads and feet of the cylinders, where the middle bone corresponds to the head of the bottom cylinder and simultaneously the foot of the top cylinder. So for example, vertices v0, v2, and v4 correspond to bone0; v1, v3, v5 correspond to bone1, and so forth. That explains why boneIndices (see below) has the value that it does.
The resting positions of the bones corresponds to the resting positions of the cylinders in the geometry (bone2 sprouts off at a 45 degree angle from bone1, just like the cylinder geometry).
With that as context, the following code creates everything needed to skin the geometry:
let vertices = [float3(0.17841241, 0.0, 0.0), float3(0.17841241, 1.0, 0.0), float3(-0.089206174, 0.0, 0.1545097), float3(-0.089206174, 1.0, 0.1545097), float3(-0.089206256, 0.0, -0.15450965), float3(-0.089206256, 1.0, -0.15450965), float3(0.12615661, 1.1261566, 0.0), float3(-0.58094996, 1.8332633, 0.0), float3(-0.063078284, 0.9369217, 0.1545097), float3(-0.7701849, 1.6440284, 0.1545097), float3(-0.063078344, 0.93692166, -0.15450965), float3(-0.77018493, 1.6440284, -0.15450965)]
let indices: [UInt8] = [0, 1, 2, 3, 4, 5, 0, 1, 1, 6, 6, 7, 8, 9, 10, 11, 6, 7]
let geometrySource = SCNGeometrySource(vertices: vertices.map { SCNVector3($0) })
let geometryElement = SCNGeometryElement(indices: indices, primitiveType: .triangleStrip)
let geometry = SCNGeometry(sources: [geometrySource], elements: [geometryElement])
let bone0 = SCNNode()
bone0.simdPosition = float3(0,0,0)
let bone1 = SCNNode()
bone1.simdPosition = float3(0,1,0)
let bone2 = SCNNode()
bone2.simdPosition = float3(0,1,0) + normalize(float3(-1,1,0))
let bones = [bone0, bone1, bone2]
let boneInverseBindTransforms: [NSValue]? = bones.map { NSValue(scnMatrix4: SCNMatrix4Invert($0.transform)) }
var boneWeights: [Float] = vertices.map { _ in 1.0 }
var boneIndices: [UInt8] = [
0, 1, 0, 1, 0, 1,
1, 2, 1, 2, 1, 2,
]
let boneWeightsData = Data(bytesNoCopy: &boneWeights, count: boneWeights.count * MemoryLayout<Float>.size, deallocator: .none)
let boneIndicesData = Data(bytesNoCopy: &boneIndices, count: boneWeights.count * MemoryLayout<UInt8>.size, deallocator: .none)
let boneWeightsGeometrySource = SCNGeometrySource(data: boneWeightsData, semantic: .boneWeights, vectorCount: boneWeights.count, usesFloatComponents: true, componentsPerVector: 1, bytesPerComponent: MemoryLayout<Float>.size, dataOffset: 0, dataStride: MemoryLayout<Float>.size)
let boneIndicesGeometrySource = SCNGeometrySource(data: boneIndicesData, semantic: .boneIndices, vectorCount: boneIndices.count, usesFloatComponents: false, componentsPerVector: 1, bytesPerComponent: MemoryLayout<UInt8>.size, dataOffset: 0, dataStride: MemoryLayout<UInt8>.size)
let skinner = SCNSkinner(baseGeometry: geometry, bones: bones, boneInverseBindTransforms: boneInverseBindTransforms, boneWeights: boneWeightsGeometrySource, boneIndices: boneIndicesGeometrySource)
let node = SCNNode(geometry: geometry)
node.skinner = skinner
Note: In most cases, you should use UInt16 not UInt8.