How to record ARView Camera Position and Rotation over time and save it to a file - swift

I have been trying to create an ARView for over two days now that can record the position of the camera in space over time and then save this to a keyframe file. Basically, I want to create an app that lets you record virtual camera movements that can then be used in 3d applications like Autodesk Maya or Cinema4D to drive a camera. preferred file outputs would be anything that can hold a camera object and animate it over time (alternatively also an object that moves over time, that I can then parent my camera to).
Here is the code I have, sorry for it being a bit chaotic, I have tried a LOT of different things... Basically I try to record device position and rotation and then save it to an MDL object, but somehow it doesn't animate. I have also tried multiple different file types (some of those didn't support keyframe animation, so that didn't help, but from what I understand, Alembic does)
import SwiftUI
import ARKit
import RealityKit
import ModelIO
struct ARViewContainer: UIViewRepresentable {
let session = ARSession()
let delegate = MySessionDelegate()
func makeUIView(context: Context) -> ARView {
// Set up the ARView with session
let arView = ARView(frame: .zero)
let boxAnchor = try! Experience.loadBox()
arView.scene.anchors.append(boxAnchor)
arView.session.delegate = delegate // assign delegate to the session
return arView
}
func updateUIView(_ uiView: ARView, context: Context) {
// Update the ARView if needed
}
func startARSession() {
// Start the ARSession
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = [.horizontal, .vertical]
session.run(configuration, options: [])
}
func stopARSession() {
// Stop the ARSession
session.pause()
}
}
class MySessionDelegate: NSObject, ARSessionDelegate {
var object: MDLMesh?
let asset = MDLAsset()
let cameraTransform = MDLTransform()
var documentsDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!
func session(_ session: ARSession, didUpdate frame: ARFrame) {
// Get the camera position and orientation for the current frame
let transform = frame.camera.transform
let rotation = frame.camera.eulerAngles
let position = transform.columns.3
let elapsedTime = frame.timestamp
cameraTransform.setTranslation(position[SIMD3(0,1,2)], forTime: elapsedTime)
cameraTransform.setRotation(rotation, forTime: elapsedTime)
print("Camera Transform: \(cameraTransform.matrix)")
}
}
struct Camera: View {
var body: some View {
VStack {
ARViewContainer().onAppear(perform: ARViewContainer().startARSession)
.onDisappear(perform: ARViewContainer().stopARSession)
Button("Export Recording") {
// Create an MDLAsset with a box representing the camera transform
let object = MDLMesh(boxWithExtent: .init(0.1, 0.1, 0.1), segments: .init(10, 10, 10), inwardNormals: false, geometryType: .triangles, allocator: nil)
object.name = "Camera Transform"
object.transform = MySessionDelegate().cameraTransform
let asset = MDLAsset()
asset.add(object)
// Export the MDLAsset to a file
let documentsDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!
let fileURL = documentsDirectory.appendingPathComponent("recording.abc")
try! asset.export(to: fileURL)
}
}
}
}
If there is a completely different way of doing it, please also share, I thank everybody in advance for any help!

Recording sixty 4x4 Transform Matrices per second
To write the data to a text file, I used an encoding of JSON data of 4x4 transformation matrix (i.e. using a complex transform). After clicking on the Record Transform Values button, the data immediately begins to be written to the toMaya.txt file. For convenience, I put all 16 values ​​of the matrix on the screen (I tested this on an iPad, so get a device with a bigger screen).
Data from nested lists in the toMaya.txt file can be easily read using regular Python or MEL scripting. Take a look at what nestings look like. Each of the 16 matrix values ​​is in Float type.
[x0,y0,z0,w0] is a first matrix column, [x1,y1,z1,w1] is a second matrix column, etc.
Here's the code:
import SwiftUI
import RealityKit
import Combine
struct ARViewContainer : UIViewRepresentable {
#Binding var arView: ARView
func makeUIView(context: Context) -> ARView { return arView }
func updateUIView(_ view: ARView, context: Context) { }
}
struct ContentView : View {
#State private var arView = ARView(frame: .zero)
#State private var subs: [AnyCancellable] = []
#State private var array: [[[Float]]] = [[ [1,0,0,0], [0,1,0,0],
[0,0,1,0], [0,0,0,1] ]]
let url = FileManager.default.urls(for: .documentDirectory,
in: .userDomainMask)[0]
.appendingPathComponent("toMaya.txt")
var body: some View {
ZStack {
ARViewContainer(arView: $arView).ignoresSafeArea()
HStack {
VStack {
Text("\(array.last![0][0])").foregroundColor(.white)
Text("\(array.last![0][1])").foregroundColor(.white)
Text("\(array.last![0][2])").foregroundColor(.white)
Text("\(array.last![0][3])").foregroundColor(.white)
}
VStack {
Text("\(array.last![1][0])").foregroundColor(.white)
Text("\(array.last![1][1])").foregroundColor(.white)
Text("\(array.last![1][2])").foregroundColor(.white)
Text("\(array.last![1][3])").foregroundColor(.white)
}
VStack {
Text("\(array.last![2][0])").foregroundColor(.white)
Text("\(array.last![2][1])").foregroundColor(.white)
Text("\(array.last![2][2])").foregroundColor(.white)
Text("\(array.last![2][3])").foregroundColor(.white)
}
VStack {
Text("\(array.last![3][0])").foregroundColor(.white)
Text("\(array.last![3][1])").foregroundColor(.white)
Text("\(array.last![3][2])").foregroundColor(.white)
Text("\(array.last![3][3])").foregroundColor(.white)
}
}
VStack {
Button("Record Transform Values") {
DispatchQueue.main.async {
arView.scene.subscribe(to: SceneEvents.Update.self) { _ in
let col = arView.cameraTransform.matrix.columns
let mtx: [[Float]] = [
[col.0.x, col.0.y, col.0.z, col.0.w],
[col.1.x, col.1.y, col.1.z, col.1.w],
[col.2.x, col.2.y, col.2.z, col.2.w],
[col.3.x, col.3.y, col.3.z, col.3.w]
]
array.append(mtx)
if let data = try? JSONEncoder().encode(self.array) {
guard let str = String(data: data, encoding: .ascii)
else { return }
do {
try str.write(to: url,
atomically: true,
encoding: .ascii)
print(url)
} catch {
print(error.localizedDescription)
}
}
}.store(in: &subs)
}
}
Spacer()
}
VStack {
Spacer()
Text("\(array.count)").foregroundColor(.white)
}
}
}
}
My toMaya.txt file is waiting for me in the following debugging directory:
file:///var/mobile/Containers/Data/Application/7C675F52-C78B-4252-98B5-3EBD37A3F832/Documents/toMaya.txt

Related

Can I use .playAudio() to resume playback after stopping?

From the question in this If I assign a sound in Reality Composer, can I stop it programmatically in RealityKit?, I would like to use method to resume playback after Play Music.
Can I do that?
Now, I use this command in stopAudio function to stop the music.
func stopAudio() {
if arView.scene.anchors.count > 0 {
if arView.scene.anchors[0].isAnchored {
arView.scene.anchors[0].children[0].stopAllAudio()
}
}
}
If I want arView.scene.anchors[0] to replay the music again, which command should I use?
Audio Playback Controller
Since RealityKit 2.0 isn't able to control parameters of Reality Composer's behaviors, the best strategy for controlling audio is to create a programmatic AudioPlaybackController. To feed your audio file to the controller, export .rcproject scene to .usdz format and use unzipping trick to extract the .aiff, .caf or .mp3 sound file. When loading audio for playback, you can choose between spatial and non-spatial audio experience.
UIKit version
import UIKit
import RealityKit
extension ViewController {
private func loadAudio() {
do {
let resource = try AudioFileResource.load(
named: "planetarium07.caf",
in: nil,
inputMode: .spatial,
loadingStrategy: .preload,
shouldLoop: true)
self.controller = entity.prepareAudio(resource)
self.controller?.speed = 0.9
self.controller?.fade(to: .infinity, duration: 2)
} catch {
print(error.localizedDescription)
}
}
}
ViewController.
class ViewController : UIViewController {
#IBOutlet var uiView: UIView! // when using #IBAction buttons
#IBOutlet var arView: ARView!
private var entity = Entity()
private var controller: AudioPlaybackController? = nil
override func viewDidLoad() {
super.viewDidLoad()
uiView.backgroundColor = .systemCyan
let boxScene = try! Experience.loadBox()
arView.scene.anchors.append(boxScene)
let anchor = boxScene.anchor
anchor?.addChild(entity)
self.loadAudio()
}
#IBAction func playMusic(_ sender: UIButton) {
self.controller?.play()
}
#IBAction func stopMusic(_ sender: UIButton) {
self.controller?.pause()
// self.controller?.stop()
}
}
SwiftUI version
import SwiftUI
import RealityKit
struct ContentView : View {
#State var arView = ARView(frame: .zero)
#State var controller: AudioPlaybackController? = nil
#State var entity = Entity()
var body: some View {
ZStack {
ARViewContainer(arView: $arView,
entity: $entity).ignoresSafeArea()
VStack {
Spacer()
Button("Play") { loadSound(); controller?.play() }
Button("Stop") { controller?.stop() }
}
}
}
func loadSound() {
do {
let resource = try AudioFileResource.load(
named: "planetarium07.caf",
in: nil,
inputMode: .spatial,
loadingStrategy: .preload,
shouldLoop: true)
self.controller = entity.prepareAudio(resource)
} catch {
print(error.localizedDescription)
}
}
}
ARViewContainer.
struct ARViewContainer: UIViewRepresentable {
#Binding var arView: ARView
#Binding var entity: Entity
func makeUIView(context: Context) -> ARView {
let boxScene = try! Experience.loadBox()
arView.scene.anchors.append(boxScene)
let anchor = boxScene.anchor
anchor?.addChild(entity)
return arView
}
func updateUIView(_ view: ARView, context: Context) { }
}

How to add a new scene to an existing anchor and remove the previous scene?

I have an rcproject file with about 12 scenes (500mb or so). In order to lessen the load on iOS devices I tried breaking it apart into separate rcproject files and change the scene using notification triggers. However when doing this and adding the new scene as a child to the main anchor, the new scene renders in a new spot, breaking the AR experience. There must be a way to add the new scenes to the exact same anchor/position. Alternatively, is there a better way than seperating the rcproject to lessen load on ram etc?
Here is my ARView
struct ARViewContainer: UIViewRepresentable {
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
// The experience consists of a "Base" (it acts as a permanent platform for all the scenes to be rendered on)
let baseAnchor = try! Base.loadIntro()
let introAnchor = try! IntroSceneOM.loadIntro()
introAnchor.actions.changeStoriesWithTrigger.onAction = loadStories
arView.scene.anchors.append(baseAnchor)
arView.scene.anchors.append(introAnchor)
func loadStories(_ entity: Entity?) -> Void {
arView.scene.anchors.remove(introAnchor)
let storiesAnchor = try! StoriesSceneOM.loadStoriesScene()
baseAnchor.addChild(storiesAnchor)
}
return arView
}
func updateUIView(_ uiView: ARView, context: Context) {} }
EDIT:
Recreated the project using an implementation of Andy Jazz's code.
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
var anchor = AnchorEntity()
let scene01 = try! Experience.loadBoxScene()
let scene02 = try! Experience.loadBallScene()
// Base Scene
let scene03 = try! Experience.loadFloppyScene()
anchor = AnchorEntity(.plane(.horizontal, classification: .any,
minimumBounds: [0.1, 0.1]))
scene01.actions.boxTapped.onAction = loadScene02
scene02.actions.ballTapped.onAction = loadScene01
anchor.addChild(scene01)
anchor.addChild(scene03)
arView.scene.anchors.append(anchor)
func loadScene02(_ entity: Entity?) -> Void {
scene01.removeFromParent()
anchor.addChild(scene02)
}
func loadScene01(_ entity: Entity?) -> Void {
scene02.removeFromParent()
anchor.addChild(scene01)
}
return arView
}
However I still get the same issue where the anchor moves each time a new scene is added.
The code is quite simple, but regarding the issue of loading scenes with a large number of polygons, remains unresolved. At the maximum, the current scene should contain no more than 100K polygons, but ideally they should be within 50...70K. Texture resolution should not exceed 2K.
import RealityKit
class ViewController: UIViewController {
#IBOutlet var arView: ARView!
#IBOutlet var label: UILabel!
var anchor = AnchorEntity()
let scene01 = try! Experience.loadBox()
let scene02 = try! Experience.loadBall()
var cube = ModelEntity()
var sphere = ModelEntity()
override func viewDidLoad() {
super.viewDidLoad()
self.cube = scene01.steelBox?.children[0] as! ModelEntity
self.sphere = scene02.ball?.children[0] as! ModelEntity
self.anchor = AnchorEntity(.plane(.horizontal, classification: .any,
minimumBounds: [0.1, 0.1]))
self.anchor.addChild(cube)
arView.scene.anchors.append(anchor)
DispatchQueue.main.asyncAfter(deadline: .now() + 5.0) {
self.label.text = String(describing: self.anchor.id)
}
}
#IBAction func pressed(_ sender: UIButton) {
self.cube.removeFromParent()
self.anchor.addChild(sphere)
self.label.text = String(describing: self.anchor.id)
}
}

Create pdf with WKWebView.pdf(configuration:)

I want to create a pdf on macOS with the new WKWebView.pdf(configuration:) which was introduced in macOS 12/iOS 15. It tries to make use of the new async/await functionality (which I most likely have not grasped entirely I am afraid...).
Right now, I am getting an error that I have no idea how to handle:
Error Domain=WKErrorDomain Code=1 "An unknown error occurred"
UserInfo={NSLocalizedDescription=An unknown error occurred}
I try to load a html string into a web view, which I then want to generate the pdf from. The function I use to generate my PDFDocument looks like this:
func generatePdf() async {
let webView = WKWebView()
await webView.loadHTMLString(html, baseURL: nil)
let config = WKPDFConfiguration()
config.rect = .init(origin: .zero, size: .init(width: 595.28, height: 841.89))
do {
//this is where the error is happening
let pdfData = try await webView.pdf(configuration: config)
self.pdf = PDFDocument(data: pdfData)
} catch {
print(error) //this error gets printed
}
}
My best guess as it currently stands is that WKWebView's loadHTMLString has not finished loading the html–I did allow for outgoing connection in the app sandbox that's not it...
For the sake of completeness, here's the entire code:
import SwiftUI
import PDFKit
import WebKit
#main
struct AFPdfApp: App {
var body: some Scene {
WindowGroup {
ContentView()
}
}
}
struct ContentView: View {
#StateObject private var vm = ViewModel()
var body: some View {
VStack {
TextEditor(text: $vm.html)
.frame(width: 300.0, height: 200.0)
.border(Color.accentColor, width: 1.0)
.padding()
PdfViewWrapper(pdfDocument: $vm.pdf)
}
.toolbar {
Button("Create PDF") {
Task {
await vm.generatePdf()
}
}
}
}
static let initHtml = """
<h1>Some fancy html</h1>
<h2>…and now how do I create a pdf from this?</h2>
"""
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
class ViewModel: ObservableObject {
#Published var html = """
<h1>Some fancy html</h1>
<h2>…and now let's create some pdf…</h2>
"""
#Published var pdf: PDFDocument? = nil
func generatePdf() async {
let webView = WKWebView()
await webView.loadHTMLString(html, baseURL: nil)
let config = WKPDFConfiguration()
config.rect = .init(origin: .zero, size: .init(width: 595.28, height: 841.89))
do {
let pdfData = try await webView.pdf(configuration: config)
self.pdf = PDFDocument(data: pdfData)
} catch {
print(error)
}
}
}
struct PdfViewWrapper: NSViewRepresentable {
#Binding var pdfDocument: PDFDocument?
func makeNSView(context: Context) -> PDFView {
return PDFView()
}
func updateNSView(_ nsView: PDFView, context: Context) {
nsView.document = pdfDocument
}
}
After Chris made me take another look at it (thanks for that :-) ) I am now a step closer to a working solution.
It really seems as though I really have to wait for the webView to load the html prior to creating the pdf. While I was not able to make it work with WKWebView.pdf(configuration:), I now have a (kind of…) working solution by using WKWebView.createPDF(configuration:completionHandler:):
func generatePdf() {
webView.loadHTMLString(htmlString, baseURL: nil)
DispatchQueue.main.asyncAfter(deadline: .now() + 0.5) {
let config = WKPDFConfiguration()
config.rect = .init(origin: .zero, size: .init(width: 595.28, height: 841.89))
self.webView.createPDF(configuration: config){ result in
switch result {
case .success(let data):
self.pdf = PDFDocument(data: data)
case .failure(let error):
print(error)
}
}
}
}
I said "kind of works" above, because the resulting pdf seems to introduce a new line after each word, which is weird–but I will scope that issue to another research/question on SO.
Again, for the sake of completeness, here's the whole "app":
import SwiftUI
import PDFKit
import WebKit
#main
struct AFPdfApp: App {
var body: some Scene {
WindowGroup {
ContentView()
}
}
}
//MARK: View
struct ContentView: View {
#StateObject private var vm = ViewModel()
var body: some View {
VStack {
TextEditor(text: $vm.htmlString)
.frame(width: 300.0, height: 200.0)
.border(Color.accentColor, width: 1.0)
.padding()
WebViewWrapper(htmlString: $vm.htmlString)
PdfViewRepresentable(pdfDocument: $vm.pdf)
}
.toolbar {
Button("Create PDF") {
Task {
vm.generatePdf()
}
}
}
}
}
//MARK: ViewModel
class ViewModel: ObservableObject {
#Published var htmlString = """
<h1>Some fancy html</h1>
<h2>…and now let's create some pdf…</h2>
"""
#Published var webView = WKWebView()
#Published var pdf: PDFDocument? = nil
func generatePdf() {
webView.loadHTMLString(htmlString, baseURL: nil)
DispatchQueue.main.asyncAfter(deadline: .now() + 0.5) {
let config = WKPDFConfiguration()
config.rect = .init(origin: .zero, size: .init(width: 595.28, height: 841.89))
self.webView.createPDF(configuration: config){ result in
switch result {
case .success(let data):
self.pdf = PDFDocument(data: data)
case .failure(let error):
print(error)
}
}
}
}
}
//MARK: ViewRepresentables
struct WebViewWrapper: NSViewRepresentable {
#Binding var htmlString: String
public func makeNSView(context: Context) -> WKWebView {
return WKWebView()
}
public func updateNSView(_ nsView: WKWebView, context: Context) {
nsView.loadHTMLString(htmlString, baseURL: nil)
}
}
struct PdfViewRepresentable: NSViewRepresentable {
#Binding var pdfDocument: PDFDocument?
func makeNSView(context: Context) -> PDFView {
return PDFView()
}
func updateNSView(_ nsView: PDFView, context: Context) {
nsView.document = pdfDocument
}
}
Here is a way that I got this working. It uses a completely hidden WKWebView to render the content, using a delegate callback and javascript execution to determine when the page has fully loaded.
When ready, the new iOS 15 WKWebView method .pdf() is called, which generates the PDF data.
Bindings are used to update the parent view, which generates a PDFDocument from the data and navigates to it.
Caveat: The native WKWebView PDF methods do not produce paginated PDF files, so it will be one long page!
In the main view it starts with a button tap. This view has a pdfData variable that is of type Data and will be used to make the PDF document.
Button("Generate PDF") {
// Prevent triggering again while already processing
guard !isWorking else { return }
// Reset flags
isReadyToRenderPDF = false
isWorking = true
// Initialise webView with frame equal to A4 paper size in points
// (I also tried UIScreen.main.bounds)
self.wkWebView = WKWebView(frame: CGRect(origin: .zero, size: CGSize(width: 595, height: 842))) // A4 size
// Set web view navigation delegate.
// You must keep a strong reference to this, so I made it an #State property on this view
// This is a class that takes bindings so it can update the data and signal completion
self.navigationDelegate = WKWebViewDelegate(wkWebView!, pdfData: $data, isReadyToRenderPDF: $isReadyToRenderPDF)
wkWebView!.navigationDelegate = self.navigationDelegate
// Generate HTML. You could also use a simple HTML string.
// This is just my implementation.
let htmlContent = htmlComposer.makeHTMLString()
// Load HTML into web view
wkWebView!.loadHTMLString(htmlContent, baseURL: nil)
// Now the navigation delegate responds when data is updated.
}
The WKWebView delegate class has this callback method.
func webView(_ webView: WKWebView, didFinish navigation: WKNavigation!) {
// This ensures following code only runs once, as delegate method is called multiple times.
guard !readyOnce else { return }
// Use javascript to check document ready state (all images and resources loaded)
webView.evaluateJavaScript("document.readyState == \"complete\"") { result, error in
if (error != nil) {
print(error!.localizedDescription)
return
} else if result as? Int == 1 {
self.readyOnce = true
do {
// Create PDF using WKWebView method and assign it to the binding (updates data variable in main view)
self.pdfData = try await webView.pdf()
// Signal to parent view via binding
self.isReadyToRenderPDF = true
} catch {
print(error.localizedDescription)
}
} else {
return
}
}
}
Back in parent view, respond to the change of Boolean value.
.onChange(of: isReadyToRenderPDF) { _ in
isWorking = false
DispatchQueue.main.async {
// This toggle navigation link or sheet displaying PDF View using pdfData variable
isShowingPDFView = true
}
}
Finally, here is a PDFKit PDFView wrapped in UIViewRepresentable.
import SwiftUI
import PDFKit
struct PDFKitRepresentedView: UIViewRepresentable {
let data: Data
init(_ data: Data) {
self.data = data
}
func makeUIView(context: UIViewRepresentableContext<PDFKitRepresentedView>) -> PDFKitRepresentedView.UIViewType {
// Create PDFKit view and document
let pdfView = PDFView()
pdfView.document = PDFDocument(data: data)
pdfView.autoScales = true
pdfView.displaysPageBreaks = true
pdfView.usePageViewController(true, withViewOptions: nil)
pdfView.displayDirection = .horizontal
pdfView.displayMode = .singlePage
return pdfView
}
func updateUIView(_ uiView: UIView, context: UIViewRepresentableContext<PDFKitRepresentedView>) {
// Not implemented
}
}

swiftui dealloc and realloc AVplayer with many videos

I have many simultaneous videos. Through a Int var (var test1 and var test2) I would like to be able to add only a certain video and remove all the others so as not to have memory problems
As soon as the view is loaded, the value "nil" is assigned to each player and when test1 == test 2 it should load the video in a certain player and in the other "nil"
The problem is that despite being the testing variable in binding (struct VideoPlayer #Binding var testing) it does not update the state of the player which always remains in Nil
Below the best solution I have obtained so far
Some idea? Thank you all
struct CustomPlayer: View {
#Binding var test1:Int
#Binding var test2:Int
#State var path:String
#State var testing:AVPlayer? = nil
var body: some View {
if(test1 == test2 ) {
self.testing? = AVPlayer(url: URL(fileURLWithPath: Bundle.main.path(forResource: "\(path)", ofType: "mp4")!) )
self.testing?.play()
} else {
self.testing?.replaceCurrentItem(with: nil)
}
return ZStack{
VideoPlayer(testing: self.$testing)
}
struct VideoPlayer : UIViewControllerRepresentable {
#Binding var testing : AVPlayer?
func makeUIViewController(context: UIViewControllerRepresentableContext<VideoPlayer>) -> AVPlayerViewController {
let controller = AVPlayerViewController()
controller.player = testing
controller.showsPlaybackControls = false
controller.view.backgroundColor = UIColor.white
return controller
}
func updateUIViewController(_ uiViewController: AVPlayerViewController, context: UIViewControllerRepresentableContext<VideoPlayer>) {
}
}
Here an example how to load multiple videos simultaneous (i added an autoplay feuature), and remove all other, but not the selected one.
To remove videos tap on video you want to save
And i'm not sure that i solve your initial problem, but this can give you a clue where to look next
Code can be copy/pasted, as i declare it in one file to convenient stackoverflow use
import SwiftUI
import AVKit
struct VideoModel: Identifiable {
let id: Int
let name: String
let type: String = ".mp4"
}
final class VideoData: ObservableObject {
#Published var videos = [VideoModel(id: 100, name: "video"),
VideoModel(id: 101, name: "wow"),
VideoModel(id: 102, name: "okay")]
}
//Multiple item player
struct MultipleVideoPlayer: View {
#EnvironmentObject var userData: VideoData
var body: some View {
VStack(alignment: .center, spacing: 8) {
ForEach(userData.videos) { video in
VideoPlayer(video: .constant(video))
.frame(minWidth: 0, maxWidth: .infinity, minHeight: 250, maxHeight: 250, alignment: .center)
}
}
}
}
//Single item player
struct VideoPlayer : UIViewControllerRepresentable {
#EnvironmentObject var userData: VideoData
#Binding var video: VideoModel
func makeUIViewController(context: Context) -> AVPlayerViewController {
guard let path = Bundle.main.path(forResource: video.name, ofType: video.type) else {
fatalError("\(video.name)\(video.type) not found")
}
let url = URL(fileURLWithPath: path)
let playerItem = AVPlayerItem(url: url)
context.coordinator.player = AVPlayer(playerItem: playerItem)
context.coordinator.player?.isMuted = true
context.coordinator.player?.actionAtItemEnd = .none
NotificationCenter.default.addObserver(context.coordinator,
selector: #selector(Coordinator.playerItemDidReachEnd(notification:)),
name: NSNotification.Name.AVPlayerItemDidPlayToEndTime,
object: context.coordinator.player?.currentItem)
let controller = AVPlayerViewController()
controller.player = context.coordinator.player
controller.showsPlaybackControls = false
controller.view.backgroundColor = UIColor.white
controller.delegate = context.coordinator
let tapRecognizer = UITapGestureRecognizer(target: context.coordinator, action: #selector(Coordinator.playerDidTap))
controller.view.addGestureRecognizer(tapRecognizer)
return controller
}
func updateUIViewController(_ uiViewController: AVPlayerViewController, context: Context) {
uiViewController.player?.play()
}
func makeCoordinator() -> Coordinator {
let coord = Coordinator(self)
return coord
}
class Coordinator: NSObject, AVPlayerViewControllerDelegate {
var parent: VideoPlayer
var player: AVPlayer?
init(_ playerViewController: VideoPlayer) {
self.parent = playerViewController
}
#objc func playerItemDidReachEnd(notification: NSNotification) {
if let playerItem: AVPlayerItem = notification.object as? AVPlayerItem {
playerItem.seek(to: CMTime.zero, completionHandler: nil)
}
}
#objc func playerDidTap(){
parent.userData.videos = parent.userData.videos.filter { videoItem in
return videoItem.id == parent.video.id
}
}
}
}
//preview
struct AnotherEntry_Previews: PreviewProvider {
static var previews: some View {
MultipleVideoPlayer()
}
}
And in 'SceneDelegate.swift' replace app entry point with
window.rootViewController = UIHostingController(rootView: MultipleVideoPlayer().environmentObject(VideoData()))
The key thing of this move is to have "VideoData" resource, you can achieve it with EnvironmentObject, or with some other shared data example
Videos below i added to project, including them to Target Membership of a project
#Published var videos = [VideoModel(id: 100, name: "video"),
VideoModel(id: 101, name: "wow"),
VideoModel(id: 102, name: "okay")]
Your code seems to be ok. Are you sure the AVPlayer(...) path is correct
and testing is not nil. Put
.....
self.testing?.play()
print("-----> testing: \(testing.debugDescription)")
is testing = nil at that point?

Pass value from SwiftUI to ARKit dynamically

I'm currently engaging personal project using ARKit with Swift.
In this app, a skeleton is appearing by detected body and it tracks body's motion.
What I want to know is, how to change the position of skeleton by touching a slider dynamically.
The position of skeleton is defined in ARKit class ARDelegateHandler such as self.characterOffset = [1, 0, 0] which means 1m right from a detected body.
I want to change the characterOffset's x-axis value by slider in SwiftUI.(something like self.characterOffset = [x, 0, 0])
Here is my code.
ARViewContainer.swift
import SwiftUI
import RealityKit
import ARKit
import Combine
struct ARViewContainer: UIViewRepresentable {
let characterAnchor = AnchorEntity()
#Binding var offsetValue:Float
#ObservedObject var offsetInstance = Offset()
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
arView.session.delegate = context.coordinator
return arView
}
func updateUIView(_ uiView: ARView, context: Context) {
let configuration = ARBodyTrackingConfiguration()
uiView.session.run(configuration)
uiView.scene.addAnchor(characterAnchor)
print(offsetValue)
}
func makeCoordinator() -> ARDelegateHandler {
ARDelegateHandler(self, anchor: characterAnchor)
}
class ARDelegateHandler: NSObject, ARSessionDelegate {
var arVC: ARViewContainer
let characterAnchor: AnchorEntity
var character: BodyTrackedEntity?
var characterOffset: SIMD3<Float>
init(_ control: ARViewContainer, anchor: AnchorEntity) {
self.arVC = control
self.characterAnchor = anchor
self.characterOffset = [1, 0, 0]
super.init()
setSkeleton()
}
func setSkeleton(){
var cancellable: AnyCancellable? = nil
cancellable = Entity.loadBodyTrackedAsync(named: "character/robot").sink(
receiveCompletion: { completion in
if case let .failure(error) = completion {
print("Error: Unable to load model: \(error.localizedDescription)")
}
cancellable?.cancel()
}, receiveValue: { (character: Entity) in
if let character = character as? BodyTrackedEntity {
character.scale = [1, 1, 1]
self.character = character
cancellable?.cancel()
} else {
print("Error: Unable to load model as BodyTrackedEntity")
}
})
}
func session(_ session: ARSession, didUpdate anchors: [ARAnchor]) {
for anchor in anchors {
guard let bodyAnchor = anchor as? ARBodyAnchor else { continue }
let bodyPosition = simd_make_float3(bodyAnchor.transform.columns.3)
characterAnchor.position = bodyPosition + characterOffset
characterAnchor.orientation = Transform(matrix: bodyAnchor.transform).rotation
if let character = character, character.parent == nil {
characterAnchor.addChild(character)
}
}
}
}
}
ContentView.swift (User is expected to touch slider and offsetValue would be changed. SlideView has been already implemented)
import SwiftUI
import RealityKit
import ARKit
import Combine
struct ContentView : View {
#State var offsetValue:Float = 0.0
#ObservedObject var offsetInstance = Offset()
var body: some View {
VStack{
Button(action:{self.offsetInstance.setOffset(offset: 1.0)},
label:{Text("check")})
ZStack(alignment: .bottom) {
ARViewContainer(offsetValue: $offsetValue)
SlideView(offSetValue: $offsetValue)
}
}
}
}
Thank you very much for your help!