Create sphere when screen tapped with adjustable radius in RealityKit [duplicate] - swift

I'm creating an app in RealityKit that generates a shape based on user input. For example, if the user enters a radius of 0.1 meters, the shape (sphere in my case) will have a radius of 0.1 meters, same logic for 0.2, 0.3, etc. The code all works, but I want to make it so the sphere appears when the user taps the screen.
Here is my code for the page that takes in user input:
class UserInput: ObservableObject {
#Published var score: Float = 0.0
}
struct PreviewView: View {
#ObservedObject var input = UserInput()
var body: some View {
NavigationView {
ZStack {
Color.black
VStack {
Text("Change the radius of your AR sphere")
.foregroundColor(.white)
Text("\(String(format: "%.1f", self.input.score)) meters")
.foregroundColor(.white)
.bold()
.font(.title)
.padding(10)
Button(action: {self.input.score += 0.1})
{
Text("Increment by 0.1 meters")
}
.padding(10)
Button(action: {self.input.score -= 0.1})
{
Text("Decrease by 0.1 meters")
}
.padding(10)
NavigationLink(destination: Reality(input: self.input)) {
Text("View in AR")
.bold()
.padding(.top,30)
}
}
}
.ignoresSafeArea()
}
}
Here is the code for the Reality ARView:
struct Reality: UIViewRepresentable {
#ObservedObject var input: UserInput
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
let model = ModelEntity(mesh: .generateSphere(radius: input.score))
let anchor = AnchorEntity(plane: .horizontal)
anchor.addChild(model)
arView.scene.anchors.append(anchor)
return arView
}
func updateUIView(_ uiView: ARView, context: Context) {}
}
There are tons of examples of generating shapes when the user touches the screen, that's not my issue. The fact that I'm taking in user input is what makes this difficult.
Here is some code that does what I want, but without user input. It has some physics built in that I plan on implementing once I get the user input to work.
struct ARViewContainer: UIViewRepresentable {
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
let planeAnchorEntity = AnchorEntity(plane: .horizontal)
let plane = ModelEntity(mesh: MeshResource.generatePlane(width: 1, depth: 1), materials: [SimpleMaterial(color: .white, isMetallic: true)])
plane.physicsBody = PhysicsBodyComponent(massProperties: .init(mass: 1), material: .generate(friction: 1, restitution: 1), mode: .kinematic)
plane.generateCollisionShapes(recursive: true)
planeAnchorEntity.addChild(plane)
arView.scene.anchors.append(planeAnchorEntity)
arView.installGestures([.scale,.rotation], for: plane)
arView.addGestureRecognizer(UITapGestureRecognizer(target: context.coordinator, action: #selector(Coordinator.handleTap)))
context.coordinator.view = arView
return arView
}
func makeCoordinator() -> Coordinator {
Coordinator()
}
func updateUIView(_ uiView: ARView, context: Context) {}
}
Here is the Coordinator class that generates the box that I want to be adjustable in size:
class Coordinator {
weak var view: ARView?
#objc func handleTap(_ recognizer: UITapGestureRecognizer) {
guard let view = view else { return }
let location = recognizer.location(in: view)
let results = view.raycast(from: location, allowing: .estimatedPlane, alignment: .horizontal)
if let result = results.first {
let anchorEntity = AnchorEntity(raycastResult: result)
let box = ModelEntity(mesh: MeshResource.generateBox(size: 0.3),materials: [SimpleMaterial(color: .black, isMetallic: true)])
box.physicsBody = PhysicsBodyComponent(massProperties: .init(mass: 0.5), material: .generate(), mode: .dynamic)
box.generateCollisionShapes(recursive: true)
box.position = simd_make_float3(0,0.7,0)
//static means body cannot be moved
//dynamic means it can move
//kinematic means user moved the object
anchorEntity.addChild(box)
view.scene.anchors.append(anchorEntity)
}
}
}
I tried fusing these two projects into what I want, but I get all sorts of errors I have no idea how to fix, and when I try something new, a bunch of other errors appear. I think it boils down to the #ObservedObject and the fact that I have multiple classes/structs compared my project with user input. The user input will go into the coordinator class, but ultimately it is the ARViewContainer that actually renders the view.
If anyone can help me out, I would be incredibly grateful.

To increase / decrease sphere's radius and then position sphere by tap, use the following code.
Now, with working button and coordinator, it will be much easier to implement a raycasting.
import SwiftUI
import RealityKit
struct ContentView : View {
var body: some View {
PrevView().ignoresSafeArea()
}
}
Reality view.
struct Reality: UIViewRepresentable {
#Binding var input: Float
let arView = ARView(frame: .zero)
class ARCoordinator: NSObject {
var manager: Reality
init(_ manager: Reality) {
self.manager = manager
super.init()
let recognizer = UITapGestureRecognizer(target: self,
action: #selector(tapped))
manager.arView.addGestureRecognizer(recognizer)
}
#objc func tapped(_ recognizer: UITapGestureRecognizer) {
if manager.arView.scene.anchors.isEmpty {
let model = ModelEntity(mesh: .generateSphere(radius:
manager.input))
let anchor = AnchorEntity(world: [0, 0,-2])
// later use AnchorEntity(world: result.worldTransform)
anchor.addChild(model)
manager.arView.scene.anchors.append(anchor)
}
}
}
func makeCoordinator() -> ARCoordinator { ARCoordinator(self) }
func makeUIView(context: Context) -> ARView { return arView }
func updateUIView(_ uiView: ARView, context: Context) { }
}
PrevView view.
struct PrevView: View {
#State private var input: Float = 0.0
var body: some View {
NavigationView {
ZStack {
Color.black
VStack {
Text("\(String(format: "%.1f", input)) meters")
.foregroundColor(.yellow)
HStack {
Spacer()
Button(action: { $input.wrappedValue -= Float(0.1) }) {
Text("Decrease").foregroundColor(.red)
}
Spacer()
Button(action: { $input.wrappedValue += Float(0.1) }) {
Text("Increase").foregroundColor(.red)
}
Spacer()
}
NavigationLink(destination: Reality(input: $input)
.ignoresSafeArea()) {
Text("View in AR")
}
}
}.ignoresSafeArea()
}
}
}

Related

Recreate Safaris Tab animation open opening / closing a WKWebView

so I am trying to recreate that animation in Safari where you have a window open and then click the button to "view all tabs". And then when you tap on an individual window it animated open.
Here is a video of an example: https://imgur.com/QNI26YK
I'm using SwiftUI currently but think I might need to do it in UIKit? Anyways, i'll show you what I have so far.
The issue I have is the scaling of the window. In safari it scales so seamlessly, where when I do it you can see it readjust. It's like they just minimize the window in some way.
Anyways, here is what I have. Any ideas would be great! Thanks
struct WebViewAnimationTest: View {
#State var show = false
#Namespace var namespace
var body: some View {
ZStack {
SimpleWebView(url: "https://google.com", fullscreen: show)
.frame(width: show ? .infinity : 200, height: show ? .infinity : 300)
.overlay(Color.primary.opacity(0.01))
.ignoresSafeArea()
.onTapGesture {
withAnimation(.spring()) {
show.toggle()
}
}
.matchedGeometryEffect(id: "id", in: namespace)
}
}
}
WEBVIEW:
import Combine
import SwiftUI
import WebKit
struct SimpleWebView: UIViewRepresentable {
typealias UIViewType = WKWebView
var url: String
var fullscreen: Bool = false
func makeUIView(context: Context) -> WKWebView {
let webView = WKWebView()
let url = URL(string: url)
webView.load(URLRequest(url: url!))
return webView
}
func updateUIView(_ uiView: UIViewType, context: Context) {
if fullscreen {
uiView.transform = CGAffineTransform(scaleX: 1, y: 1)
} else {
let actualWidth = (getRect().width - 60)
let cardWidth = actualWidth / 2
let scale = cardWidth / actualWidth
uiView.transform = CGAffineTransform(scaleX: scale, y: scale)
}
}
}
Here is what I have so far. You might want to switch to LazyVGrid with ScrollView. The idea is to animate changes in scale effect.
import SwiftUI
struct ContentView: View {
var body: some View {
WebViewAnimationTest()
}
}
struct WebViewAnimationTest: View {
#State var show = false
#Namespace var namespace
let data = SimpleWebView(url: "https://google.com")
var body: some View {
ScrollView {
ZStack {
HStack {
ForEach([data], id: \.id) { view in
view
.frame(width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
.ignoresSafeArea()
.scaleEffect(show ? 1.0 : 0.5)
.onTapGesture {
withAnimation {
show.toggle()
}
}
}
}
}
}
}
}
import WebKit
struct SimpleWebView: UIViewRepresentable {
typealias UIViewType = WKWebView
var url: String
var fullscreen: Bool = false
let id = UUID()
func makeUIView(context: Context) -> WKWebView {
let webView = WKWebView()
let url = URL(string: url)
webView.load(URLRequest(url: url!))
return webView
}
func updateUIView(_ uiView: UIViewType, context: Context) {
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
You can use 'UIView.transition' like below
let webView = WKWebView()
self.view.addSubview(webView)
UIView.transition(with: self.view, duration: 0.5, options: .transitionFlipFromRight, animations: {
webView.isHidden = false
}, completion: nil)
UIView.transition(with: self.view, duration: 0.5, options: .transitionCrossDissolve, animations: {
webView.isHidden = true
}, completion: nil)

How to make a text appear on the screen after pressing the button?

I want to make the text display on the screen according to the different scenes after pressing the button. For example, if model A is displayed, text "A" will be appeared on the screen. Similarly, if model B is displayed, text "B" will also be appeared. I am currently creating Augmented Reality app using SwiftUI interface and RealityKit but not sure what to do in the next step.
Here is my code:
import SwiftUI
import RealityKit
struct ContentView : View {
#State var arView = ARView(frame: .zero)
var body: some View {
ZStack {
ARViewContainer(arView: $arView)
HStack {
Spacer()
Button("information") {
print(self.arView.scene.name)
print(arView.scene.anchors.startIndex)
print(arView.scene.anchors.endIndex)
}
Spacer()
Button("remove") {
stop()
}
Spacer()
}
} .edgesIgnoringSafeArea(.all)
}
func stop() {
arView.scene.anchors.removeAll()
}
}
struct ARViewContainer: UIViewRepresentable {
#Binding var arView: ARView
func makeUIView(context: Context) -> ARView {
let boxAnchor = try! Experience1.loadBox()
let crownAnchor = try! Experience1.loadCrown()
arView.scene.anchors.append(boxAnchor)
arView.scene.anchors.append(crownAnchor)
return arView
}
func updateUIView(_ uiView: ARView, context: Context) { }
}
From the code above, if boxAnchor and crownAnchor and displayed, text "Box" and "Crown" will be appeared on the screen respectively. Anyone who knows how to do that please guide me or suggest a tutorial that I can use to study.
Sorry if I use the wrong technical terms. Thank you
Use Combine's reactive subscriber and MVVM's bindings to update string values for Text views.
import SwiftUI
import RealityKit
import Combine
struct ContentView : View {
#State private var arView = ARView(frame: .zero)
#State private var str01: String = "...some text..."
#State private var str02: String = "...some text..."
var body: some View {
ZStack {
ARViewContainer(arView: $arView, str01: $str01, str02: $str02)
.ignoresSafeArea()
VStack {
Spacer()
Text(str01)
.foregroundColor(.white)
.font(.largeTitle)
Divider()
Text(str02)
.foregroundColor(.white)
.font(.largeTitle)
Spacer()
}
}
}
}
The miracle happens in the escaping closure of subscribe(to:) instance method. What will be the conditions in the if-statements is up to you.
struct ARViewContainer: UIViewRepresentable {
#Binding var arView: ARView
#Binding var str01: String
#Binding var str02: String
#State var subs: [AnyCancellable] = []
func makeUIView(context: Context) -> ARView {
let boxAnchor = try! Experience.loadBox()
let crownAnchor = try! Experience.loadCrown()
print(arView.scene.anchors.count)
DispatchQueue.main.asyncAfter(deadline: .now() + 2) {
arView.scene.anchors.append(boxAnchor)
arView.scene.anchors.append(crownAnchor)
print(arView.scene.anchors.count)
}
return arView
}
func updateUIView(_ view: ARView, context: Context) {
DispatchQueue.main.async {
_ = view.scene.subscribe(to: SceneEvents.DidAddEntity.self) { _ in
if view.scene.anchors.count > 0 {
if view.scene.anchors[0].isAnchored {
str01 = "Crown"
str02 = "Cube"
}
}
}.store(in: &subs)
}
}
}

User input to change size of shape that is generated by tap

I'm creating an app in RealityKit that generates a shape based on user input. For example, if the user enters a radius of 0.1 meters, the shape (sphere in my case) will have a radius of 0.1 meters, same logic for 0.2, 0.3, etc. The code all works, but I want to make it so the sphere appears when the user taps the screen.
Here is my code for the page that takes in user input:
class UserInput: ObservableObject {
#Published var score: Float = 0.0
}
struct PreviewView: View {
#ObservedObject var input = UserInput()
var body: some View {
NavigationView {
ZStack {
Color.black
VStack {
Text("Change the radius of your AR sphere")
.foregroundColor(.white)
Text("\(String(format: "%.1f", self.input.score)) meters")
.foregroundColor(.white)
.bold()
.font(.title)
.padding(10)
Button(action: {self.input.score += 0.1})
{
Text("Increment by 0.1 meters")
}
.padding(10)
Button(action: {self.input.score -= 0.1})
{
Text("Decrease by 0.1 meters")
}
.padding(10)
NavigationLink(destination: Reality(input: self.input)) {
Text("View in AR")
.bold()
.padding(.top,30)
}
}
}
.ignoresSafeArea()
}
}
Here is the code for the Reality ARView:
struct Reality: UIViewRepresentable {
#ObservedObject var input: UserInput
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
let model = ModelEntity(mesh: .generateSphere(radius: input.score))
let anchor = AnchorEntity(plane: .horizontal)
anchor.addChild(model)
arView.scene.anchors.append(anchor)
return arView
}
func updateUIView(_ uiView: ARView, context: Context) {}
}
There are tons of examples of generating shapes when the user touches the screen, that's not my issue. The fact that I'm taking in user input is what makes this difficult.
Here is some code that does what I want, but without user input. It has some physics built in that I plan on implementing once I get the user input to work.
struct ARViewContainer: UIViewRepresentable {
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
let planeAnchorEntity = AnchorEntity(plane: .horizontal)
let plane = ModelEntity(mesh: MeshResource.generatePlane(width: 1, depth: 1), materials: [SimpleMaterial(color: .white, isMetallic: true)])
plane.physicsBody = PhysicsBodyComponent(massProperties: .init(mass: 1), material: .generate(friction: 1, restitution: 1), mode: .kinematic)
plane.generateCollisionShapes(recursive: true)
planeAnchorEntity.addChild(plane)
arView.scene.anchors.append(planeAnchorEntity)
arView.installGestures([.scale,.rotation], for: plane)
arView.addGestureRecognizer(UITapGestureRecognizer(target: context.coordinator, action: #selector(Coordinator.handleTap)))
context.coordinator.view = arView
return arView
}
func makeCoordinator() -> Coordinator {
Coordinator()
}
func updateUIView(_ uiView: ARView, context: Context) {}
}
Here is the Coordinator class that generates the box that I want to be adjustable in size:
class Coordinator {
weak var view: ARView?
#objc func handleTap(_ recognizer: UITapGestureRecognizer) {
guard let view = view else { return }
let location = recognizer.location(in: view)
let results = view.raycast(from: location, allowing: .estimatedPlane, alignment: .horizontal)
if let result = results.first {
let anchorEntity = AnchorEntity(raycastResult: result)
let box = ModelEntity(mesh: MeshResource.generateBox(size: 0.3),materials: [SimpleMaterial(color: .black, isMetallic: true)])
box.physicsBody = PhysicsBodyComponent(massProperties: .init(mass: 0.5), material: .generate(), mode: .dynamic)
box.generateCollisionShapes(recursive: true)
box.position = simd_make_float3(0,0.7,0)
//static means body cannot be moved
//dynamic means it can move
//kinematic means user moved the object
anchorEntity.addChild(box)
view.scene.anchors.append(anchorEntity)
}
}
}
I tried fusing these two projects into what I want, but I get all sorts of errors I have no idea how to fix, and when I try something new, a bunch of other errors appear. I think it boils down to the #ObservedObject and the fact that I have multiple classes/structs compared my project with user input. The user input will go into the coordinator class, but ultimately it is the ARViewContainer that actually renders the view.
If anyone can help me out, I would be incredibly grateful.
To increase / decrease sphere's radius and then position sphere by tap, use the following code.
Now, with working button and coordinator, it will be much easier to implement a raycasting.
import SwiftUI
import RealityKit
struct ContentView : View {
var body: some View {
PrevView().ignoresSafeArea()
}
}
Reality view.
struct Reality: UIViewRepresentable {
#Binding var input: Float
let arView = ARView(frame: .zero)
class ARCoordinator: NSObject {
var manager: Reality
init(_ manager: Reality) {
self.manager = manager
super.init()
let recognizer = UITapGestureRecognizer(target: self,
action: #selector(tapped))
manager.arView.addGestureRecognizer(recognizer)
}
#objc func tapped(_ recognizer: UITapGestureRecognizer) {
if manager.arView.scene.anchors.isEmpty {
let model = ModelEntity(mesh: .generateSphere(radius:
manager.input))
let anchor = AnchorEntity(world: [0, 0,-2])
// later use AnchorEntity(world: result.worldTransform)
anchor.addChild(model)
manager.arView.scene.anchors.append(anchor)
}
}
}
func makeCoordinator() -> ARCoordinator { ARCoordinator(self) }
func makeUIView(context: Context) -> ARView { return arView }
func updateUIView(_ uiView: ARView, context: Context) { }
}
PrevView view.
struct PrevView: View {
#State private var input: Float = 0.0
var body: some View {
NavigationView {
ZStack {
Color.black
VStack {
Text("\(String(format: "%.1f", input)) meters")
.foregroundColor(.yellow)
HStack {
Spacer()
Button(action: { $input.wrappedValue -= Float(0.1) }) {
Text("Decrease").foregroundColor(.red)
}
Spacer()
Button(action: { $input.wrappedValue += Float(0.1) }) {
Text("Increase").foregroundColor(.red)
}
Spacer()
}
NavigationLink(destination: Reality(input: $input)
.ignoresSafeArea()) {
Text("View in AR")
}
}
}.ignoresSafeArea()
}
}
}

SwiftUI why a variable is passed on other views automatically?

How is my MapView "annotations" variable get updated every time I change my "locations" variable in my ContentView? I googled that swift array is a value type, and I don't even need to use binding on the "annotations" and bind to the "locations" for "annotations" get to know when "locations" changes, why is that?
import SwiftUI
import MapKit
struct ContentView: View {
#State var centerCoordinate = CLLocationCoordinate2D()
#State var locations = [MKPointAnnotation]()
var body: some View {
ZStack{
MapView(centerCoordinate: $centerCoordinate, annotations: locations)
Circle()
.fill(Color.blue)
.opacity(0.5)
.frame(width: 32, height: 32, alignment: .center)
VStack{
Spacer()
HStack{
Spacer()
Button(action: {
let newLocation = MKPointAnnotation()
newLocation.coordinate = centerCoordinate
locations.append(newLocation)
}){
Image(systemName: "plus")
}
.padding()
.background(Color.black.opacity(0.75))
.foregroundColor(.white)
.font(.title)
.clipShape(Circle())
.padding([.trailing, .bottom])
}
}
}
.edgesIgnoringSafeArea(.all)
}
}
struct MapView: UIViewRepresentable{
#Binding var centerCoordinate: CLLocationCoordinate2D
var annotations: [MKPointAnnotation]
func makeUIView(context: Context) -> MKMapView {
let mapView = MKMapView()
mapView.delegate = context.coordinator
return mapView
}
// Callback function - when anything being sent to UIViewRepresentabel struct is changed
func updateUIView(_ uiView: UIViewType, context: Context) {
print("Updating UIView")
if annotations.count != uiView.annotations.count{
uiView.removeAnnotations(uiView.annotations)
uiView.addAnnotations(annotations)
}
}
func makeCoordinator() -> Coordinator {
return Coordinator(self)
}
class Coordinator: NSObject, MKMapViewDelegate {
let parent: MapView
init(_ parent: MapView) {
self.parent = parent
}
func mapViewDidChangeVisibleRegion(_ mapView: MKMapView) {
parent.centerCoordinate = mapView.centerCoordinate
}
}
}
extension MKPointAnnotation{
static var example: MKPointAnnotation{
let point = MKPointAnnotation()
point.coordinate = CLLocationCoordinate2D(latitude: 51.5, longitude: -0.13)
point.title = "London"
point.subtitle = "The home of 2012 Summer Olympics"
return point
}
}
Not sure if I understood your question but let me try.
First you have this state in your View:
#State var locations = [MKPointAnnotation]()
Next, you assign its value to:
MapView(centerCoordinate: $centerCoordinate, annotations: locations)
This means, every time you change locations, SwiftUI will re-render the MapView with new values.
Any UIViewRepresentable have to implement the func updateUIView in order to receive these changes when SwiftUI re-render the respective component.
So, every time your MapView being rendered, this function is going to be trigged:
func updateUIView(_ uiView: UIViewType, context: Context) {
print("Updating UIView")
if annotations.count != uiView.annotations.count{
uiView.removeAnnotations(uiView.annotations)
uiView.addAnnotations(annotations)
}
}
The uiView.annotations and annotations are note related each other. At the moment uiView.annotations changes, it just make side effects in the view itself and don't update the given #State because it could create an infinity rendering loop.

SwiftUI Button interact with Map

I'm totally new with Swift and SwiftUI and for a project group, I need to develop my first IOS app.
I can display a map with Mapbox but I don't know how to follow my user when I click on a button.
I don't know how to interact my button with my struct MapView
This is my code:
MapView.swift:
import Mapbox
struct MapView: UIViewRepresentable {
let mapView: MGLMapView = MGLMapView(frame: .zero)
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
func makeUIView(context: UIViewRepresentableContext<MapView>) -> MGLMapView {
mapView.delegate = context.coordinator
return mapView
}
func updateUIView(_ uiView: MGLMapView, context: UIViewRepresentableContext<MapView>) {
}
func styleURL(_ styleURL: URL) -> MapView {
mapView.styleURL = styleURL
return self
}
func centerCoordinate(_ centerCoordinate: CLLocationCoordinate2D) -> MapView {
mapView.centerCoordinate = centerCoordinate
return self
}
func zoomLevel(_ zoomLevel: Double) -> MapView {
mapView.zoomLevel = zoomLevel
return self
}
func userTrackingMode(_ userTrackingMode: MGLUserTrackingMode) -> MapView {
mapView.userTrackingMode = userTrackingMode
return self
}
}
class Coordinator: NSObject, MGLMapViewDelegate {
var parent: MapView
init(_ parent: MapView) {
self.parent = parent
}
}
ContentView.swift:
import Mapbox
struct ContentView: View {
#Environment(\.colorScheme) var colorScheme: ColorScheme
var body: some View {
ZStack {
MapView()
.userTrackingMode(.follow)
.edgesIgnoringSafeArea(.all)
HStack(alignment: .top) {
Spacer()
VStack() {
Button(action: {
//ACTION TO CHANGE FOLLOW MODE
}) {
Image(systemName: "location.fill")
.frame(width: 40.0, height: 40.0)
}
.padding(.top, 60.0)
.padding(.trailing, 10.0)
.frame(width: 45.0, height: 80.0)
Spacer()
}
}
}
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView().environment(\.colorScheme, .dark)
}
}
Actually I think in your case, as you have a reference to map, you can try to interact with it directly (aka imperatively, because it is such by nature, so no need to make simple thing complex)
Like
...
let myMapHolder = MapView()
var body: some View {
ZStack {
myMapHolder
.userTrackingMode(.follow)
.edgesIgnoringSafeArea(.all)
...
VStack() {
Button(action: {
self.myMapHolder.mapView.userTrackingMode = _your_mode_
}) {
Image(systemName: "location.fill")