How to show different prefabs for different images in ARcore-Augmented Image scene using Unity? - unity3d

Hi I am trying to augment different prefabs for different images say around 20 models.Currently testing with 2 models for 2 images in AugmentedImage sample scene.I have added the script AugmentedImageVisualizer.cs to each prefab.I drag and dropped the two models to the script.In the AugmenetedImageExampleController.cs I have made the following changes.
namespace GoogleARCore.Examples.AugmentedImage
{
using System.Collections.Generic;
using System.Runtime.InteropServices;
using GoogleARCore;
using UnityEngine;
using UnityEngine.UI;
/// <summary>
/// Controller for AugmentedImage example.
/// </summary>
public class AugmentedImageExampleController : MonoBehaviour
{
/// <summary>
/// A prefab for visualizing an AugmentedImage.
/// </summary>
// public AugmentedImageVisualizer AugmentedImageVisualizerPrefab;
public List<AugmentedImageVisualizer> AugmentedImageVisualizerPrefab = new List<AugmentedImageVisualizer>();
/// <summary>
/// The overlay containing the fit to scan user guide.
/// </summary>
public GameObject FitToScanOverlay;
private Dictionary<int, AugmentedImageVisualizer> m_Visualizers
= new Dictionary<int, AugmentedImageVisualizer>();
private List<AugmentedImage> m_TempAugmentedImages = new List<AugmentedImage>();
/// <summary>
/// The Unity Update method.
/// </summary>
public void Update()
{
// Exit the app when the 'back' button is pressed.
if (Input.GetKey(KeyCode.Escape))
{
Application.Quit();
}
// Check that motion tracking is tracking.
if (Session.Status != SessionStatus.Tracking)
{
return;
}
// Get updated augmented images for this frame.
Session.GetTrackables<AugmentedImage>(m_TempAugmentedImages, TrackableQueryFilter.Updated);
// Create visualizers and anchors for updated augmented images that are tracking and do not previously
// have a visualizer. Remove visualizers for stopped images.
foreach (var image in m_TempAugmentedImages)
{
AugmentedImageVisualizer visualizer = null;
m_Visualizers.TryGetValue(image.DatabaseIndex, out visualizer);
if (image.TrackingState == TrackingState.Tracking && visualizer == null)
{
// Create an anchor to ensure that ARCore keeps tracking this augmented image.
Anchor anchor = image.CreateAnchor(image.CenterPose);
visualizer = (AugmentedImageVisualizer)Instantiate(AugmentedImageVisualizerPrefab[image.DatabaseIndex], anchor.transform);
visualizer.Image = image;
m_Visualizers.Add(image.DatabaseIndex, visualizer);
}
else if (image.TrackingState == TrackingState.Stopped && visualizer != null)
{
m_Visualizers.Remove(image.DatabaseIndex);
GameObject.Destroy(visualizer.gameObject);
}
}
// Show the fit-to-scan overlay if there are no images that are Tracking.
foreach (var visualizer in m_Visualizers.Values)
{
if (visualizer.Image.TrackingState == TrackingState.Tracking)
{
FitToScanOverlay.SetActive(false);
return;
}
}
FitToScanOverlay.SetActive(true);
}
}
}
My unity screen looks like below
Added Augmented Image Visualizer script to the prefabs to Rabbit prefab and Monkey prefab.Image given below
This is how it should be done?The problem once the model appears it will not disappear.So when I show the next image anther model comes on top of it.How to hide the model when the image is not tracked?
In the AugmentedImageControllerExample.cs we are using the below code.Still I dont understand why the models are not disappearing after they lost tracking of the image.
else if (image.TrackingState == TrackingState.Stopped && visualizer != null)
{
m_Visualizers.Remove(image.DatabaseIndex);
GameObject.Destroy(visualizer.gameObject);
}
AugmentedImageVisualizer.cs code given below?I have referred this Link.
namespace GoogleARCore.Examples.AugmentedImage
{
using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
using GoogleARCore;
using GoogleARCoreInternal;
using UnityEngine;
using UnityEngine.UI;
/// <summary>
/// Uses 4 frame corner objects to visualize an AugmentedImage.
/// </summary>
public class AugmentedImageVisualizer : MonoBehaviour
{
/// <summary>
/// The AugmentedImage to visualize.
/// </summary>
public AugmentedImage Image;
public GameObject[] Models;
private void Start()
{
}
/// <summary>
/// A model for the lower left corner of the frame to place when an image is detected.
/// </summary>
// public GameObject FrameLowerLeft;
/// <summary>
/// A model for the lower right corner of the frame to place when an image is detected.
/// </summary>
// public GameObject FrameLowerRight;
/// <summary>
/// A model for the upper left corner of the frame to place when an image is detected.
/// </summary>
// public GameObject FrameUpperLeft;
/// <summary>
/// A model for the upper right corner of the frame to place when an image is detected.
/// </summary>
// public GameObject FrameUpperRight;
/// <summary>
/// The Unity Update method.
/// </summary>
public void Update()
{
if (Image == null || Image.TrackingState != TrackingState.Tracking)
{
Models[Image.DatabaseIndex].SetActive(false);
//Models[0].SetActive(false);
//Models[1].SetActive(false);
return;
}
if (Image == null || Image.TrackingState == TrackingState.Stopped)
{
Models[Image.DatabaseIndex].SetActive(false);
//Models[0].SetActive(false);
//Models[1].SetActive(false);
return;
}
if (Image == null || Image.TrackingState == TrackingState.Paused)
{
Models[Image.DatabaseIndex].SetActive(false);
//Models[0].SetActive(false);
//Models[1].SetActive(false);
return;
}
Models[Image.DatabaseIndex].SetActive(true);
}
}
}

The problem is, that in your update function you set always both models active true. But you should only set the model active you are tracking! So like said in the comment you should use the AugmentedImage DatabseIndex.
For example your Models[0] is the model coresponding to the first Image in the Database and the Models[1] is coresponding to the second Image.
So instead of:
// Wrong code, because you're always setting both models active
Models[0].SetActive(true);
Models[1].SetActive(true);
you can write:
// Only set the tracking Image active
Models[Image.DatabaseIndex].SetActive(true);
Another thing is in your if (Image != null && Image.TrackingState == TrackingState.Paused) and if (Image != null && Image.TrackingState == TrackingState.Stopped) you could write a return; after deactivating your model, so that you quit your Update function and don't set the model active again.

TrackingState is not working as intended.
Try using TrackingMethod where ever you are using TrackingState . Both in AugmentedImageExampleController and AugmentedImageVisualizer scripts
TrackingMethod has 3 states FullTracking,NotTracking, LastKnowPose.
for eg: the below if statement will be
if (image.TrackingState == TrackingState.Tracking && visualizer == null){}
change to
if (image.TrackingMethod == AugmentedImageTrackingMethod.FullTracking && visualizer == null){}

Here is complete working example in case some one is looking for this solution. https://github.com/darshanpv/DigiCard

Related

AR core loading prefabs dynamically in unity

I have recently started dabbling in Unity with ARcore, I am using the HelloARController.cs that comes with ARcore 1.5 but I want to load a bundle from my server, I have two scripts
one called LoadAsset.cs which loads a bundle from a webserver and I have another called HelloARcontroller.cs which is basically the main part of the app, this allows me to preset prefabs that will be loaded when the screen is clicked
What I would like to do instead of loading one of the "Andy" objects is for it to pull a prefab from the webserver and use that instead
I have managed so far to load a prefab from my server at runtime but placing it is another matter
namespace GoogleARCore.Examples.HelloAR
{
using System.Collections.Generic;
using GoogleARCore;
using GoogleARCore.Examples.Common;
using UnityEngine;
using System;
using System.Collections;
#if UNITY_EDITOR
// Set up touch input propagation while using Instant Preview in the editor.
using Input = InstantPreviewInput;
#endif
/// <summary>
/// Controls the HelloAR example.
/// </summary>
public class HelloARController : MonoBehaviour
{
public string BundleURL; // -->
http://Myserver/public_http/Assets/AssetBundles/cube_prefab (path to the AssetBundle)
public string AssetName; // --> Cube_pref (name of the Asset prefab)
public int version;
/// <summary>
/// The first-person camera being used to render the passthrough camera image (i.e. AR background).
/// </summary>
public Camera FirstPersonCamera;
/// <summary>
/// A prefab for tracking and visualizing detected planes.
/// </summary>
public GameObject DetectedPlanePrefab;
/// <summary>
/// A model to place when a raycast from a user touch hits a plane.
/// </summary>
public GameObject AndyPlanePrefab;
/// <summary>
/// A model to place when a raycast from a user touch hits a feature point.
/// </summary>
public GameObject AndyPointPrefab;
/// <summary>
/// A gameobject parenting UI for displaying the "searching for planes" snackbar.
/// </summary>
public GameObject SearchingForPlaneUI;
/// <summary>
/// The rotation in degrees need to apply to model when the Andy model is placed.
/// </summary>
private const float k_ModelRotation = 180.0f;
/// <summary>
/// A list to hold all planes ARCore is tracking in the current frame. This object is used across
/// the application to avoid per-frame allocations.
/// </summary>
private List<DetectedPlane> m_AllPlanes = new List<DetectedPlane>();
/// <summary>
/// True if the app is in the process of quitting due to an ARCore connection error, otherwise false.
/// </summary>
private bool m_IsQuitting = false;
/// <summary>
/// The Unity Update() method.
/// </summary>
void Start()
{
StartCoroutine(DownloadAndCache());
}
IEnumerator DownloadAndCache()
{
// Load the AssetBundle file from Cache if it exists with the same version or download and store it in the cache
using (WWW www = WWW.LoadFromCacheOrDownload(BundleURL, version))
{
yield return www;
if (www.error != null)
throw new Exception("WWW download had an error:" + www.error);
AssetBundle bundle = www.assetBundle;
GameObject testing = bundle.LoadAsset(AssetName) as GameObject;
// Unload the AssetBundles compressed contents to conserve memory
bundle.Unload(false);
} // memory is freed from the web stream (www.Dispose() gets called implicitly)
}
I've declared all my variables and managed to get the Prefab to load up to this point ^^^^^
public void Update()
{
_UpdateApplicationLifecycle();
// Hide snackbar when currently tracking at least one plane.
Session.GetTrackables<DetectedPlane>(m_AllPlanes);
bool showSearchingUI = true;
for (int i = 0; i < m_AllPlanes.Count; i++)
{
if (m_AllPlanes[i].TrackingState == TrackingState.Tracking)
{
showSearchingUI = false;
break;
}
}
SearchingForPlaneUI.SetActive(showSearchingUI);
// If the player has not touched the screen, we are done with this update.
Touch touch;
if (Input.touchCount < 1 || (touch = Input.GetTouch(0)).phase != TouchPhase.Began)
{
return;
}
// Raycast against the location the player touched to search for planes.
TrackableHit hit;
TrackableHitFlags raycastFilter = TrackableHitFlags.PlaneWithinPolygon |
TrackableHitFlags.FeaturePointWithSurfaceNormal;
if (Frame.Raycast(touch.position.x, touch.position.y, raycastFilter, out hit))
{
// Use hit pose and camera pose to check if hittest is from the
// back of the plane, if it is, no need to create the anchor.
if ((hit.Trackable is DetectedPlane) &&
Vector3.Dot(FirstPersonCamera.transform.position - hit.Pose.position,
hit.Pose.rotation * Vector3.up) < 0)
{
Debug.Log("Hit at back of the current DetectedPlane");
}
else
{
This is where it loads the Andy prefabs, what I would like to do is swap the andy prefab out for the asset bundle
// Choose the Andy model for the Trackable that got hit.
GameObject prefab;
if (hit.Trackable is FeaturePoint)
{
prefab = AndyPlanePrefab;
}
else
{
prefab = AndyPointPrefab;
}
// Instantiate Andy model at the hit pose.
var andyObject = Instantiate(prefab, hit.Pose.position, hit.Pose.rotation);
// Compensate for the hitPose rotation facing away from the raycast (i.e. camera).
andyObject.transform.Rotate(0, k_ModelRotation, 0, Space.Self);
// Create an anchor to allow ARCore to track the hitpoint as understanding of the physical
// world evolves.
var anchor = hit.Trackable.CreateAnchor(hit.Pose);
// Make Andy model a child of the anchor.
andyObject.transform.parent = anchor.transform;
}
}
}
/// <summary>
/// Check and update the application lifecycle.
/// </summary>
private void _UpdateApplicationLifecycle()
{
// Exit the app when the 'back' button is pressed.
if (Input.GetKey(KeyCode.Escape))
{
Application.Quit();
}
// Only allow the screen to sleep when not tracking.
if (Session.Status != SessionStatus.Tracking)
{
const int lostTrackingSleepTimeout = 15;
Screen.sleepTimeout = lostTrackingSleepTimeout;
}
else
{
Screen.sleepTimeout = SleepTimeout.NeverSleep;
}
if (m_IsQuitting)
{
return;
}
// Quit if ARCore was unable to connect and give Unity some time for the toast to appear.
if (Session.Status == SessionStatus.ErrorPermissionNotGranted)
{
_ShowAndroidToastMessage("Camera permission is needed to run this application.");
m_IsQuitting = true;
Invoke("_DoQuit", 0.5f);
}
else if (Session.Status.IsError())
{
_ShowAndroidToastMessage("ARCore encountered a problem connecting. Please start the app again.");
m_IsQuitting = true;
Invoke("_DoQuit", 0.5f);
}
}
/// <summary>
/// Actually quit the application.
/// </summary>
private void _DoQuit()
{
Application.Quit();
}
/// <summary>
/// Show an Android toast message.
/// </summary>
/// <param name="message">Message string to show in the toast.</param>
private void _ShowAndroidToastMessage(string message)
{
AndroidJavaClass unityPlayer = new AndroidJavaClass("com.unity3d.player.UnityPlayer");
AndroidJavaObject unityActivity = unityPlayer.GetStatic<AndroidJavaObject>("currentActivity");
if (unityActivity != null)
{
AndroidJavaClass toastClass = new AndroidJavaClass("android.widget.Toast");
unityActivity.Call("runOnUiThread", new AndroidJavaRunnable(() =>
{
AndroidJavaObject toastObject = toastClass.CallStatic<AndroidJavaObject>("makeText", unityActivity,
message, 0);
toastObject.Call("show");
}));
}
}
}
}
My plan is to change the URL dynamically as thats just a public variable so that I can load a different prefab once the app is running, then change the url and load a different prefab.
Any help, advice greatly appreciated.

How to get screen click input from anywhere with custom editor attribute?

TL;DR: How do I implement Unity's 'color from screen' functionality but with vectors?
Ok so title is pretty simplified for what I'm trying to do:
Have the user click a button, then click a position on the screen to have that [world] position be saved as the vector. - This is mostly working, except it won't detect left clicks outside of the inspector.
Disable left click for everything else on the unity editor (so when you click a position it doesn't change focus to another GameObject). - This is the main problem.
Tracking the mouse and getting the world position was pretty easy, it's just a bool to save if the mouse is being tracked and a SerializedProperty to save which value the mouse position is being saved to.
Here's what my attribute looks like:
public class VectorPickerAttribute : PropertyAttribute {
readonly bool relative;
/// <summary>
/// Works a lot like the color picker, except for vectors.
/// </summary>
/// <param name="relative">Make the final vector relative to the transform?</param>
public VectorPickerAttribute(bool relative = false) {
this.relative = relative;
}
}
Here is the PropertyDrawer:
[CustomPropertyDrawer(typeof(VectorPickerAttribute))]
public class VectorPickerDrawer : PropertyDrawer {
bool trackMouse = false;
SerializedProperty v;
public override void OnGUI(Rect position, SerializedProperty property, GUIContent label) {
if(property.propertyType == SerializedPropertyType.Vector2) {
Rect button = new Rect(position);
button.x = position.width - 2;
button.width = position.height;
bool pressed = GUI.Button(button, "");
if(pressed) {
trackMouse = true;
v = property;
}
else if(Input.GetMouseButtonDown(0)) trackMouse = false;
bool tracking = trackMouse && v.propertyPath == property.propertyPath;
if(tracking) {
property.vector2Value =
Camera.main.ScreenToWorldPoint(
GUIUtility.GUIToScreenPoint(
Event.current.mousePosition
));
}
GUI.enabled = !tracking;
EditorGUI.Vector2Field(position, label.text, property.vector2Value);
GUI.enabled = true;
EditorUtility.SetDirty(property.serializedObject.targetObject);
}
}
}
And here's what it does so far:
You click the button on the right, and it will update the vector to the mouse position until it detects a left click with Input.GetMouseButtonDown(0).
Problems with this:
It will only detect a click when it's actually on the inspector window.
When you click outside the inspector window it will either not change anything or it will select something else so it will close the inspector (but since it saves the mouse position every OnGUI() that point where you clicked will be saved to the vector, so I guess it works??).
I've tried covering the screen with a blank window, but I couldn't get GUI.Window or GUI.ModalWindow to do anything in the PropertyDrawer. I've also tried using GUI.UnfocusWindow(), but either it doesn't work in PropertyDrawer or it's only meant for Unity's windows or something.
Core aspects:
overwrite SceneView.onSceneGUIDelegate in order to catch any mouse events on the SceneView
use ActiveEditorTracker.sharedTracker.isLocked to lock and unlock the inspector to prevent losing the focus (which would cause the OnGUI not getting called anymore)
use Selection.activeGameObject and set it to the GameObject the drawer is on in order to prevent losing the focus on the GameObject (especially in the moment ActiveEditorTracker.sharedTracker.isLocked is set to false it seems to automatically clear Selection.activeGameObject)
Allow reverting the value to previous using the Escape key
Use Event.current.Use(); and/or Event.current = null; (I just wanted to be very sure) in order to prevent the event to propagate and being handled by someone else
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
/// <summary>
/// Attribute for setting a vector by clicking on the screen in editor mode.
/// </summary>
public class VectorPickerAttribute : PropertyAttribute {
public readonly bool relative;
/// <summary>Works a lot like the color picker, except for vectors.</summary>
/// <param name="relative">Make the final vector relative the transform?</param>
public VectorPickerAttribute(bool relative = false) {
this.relative = relative;
}
}
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEditor;
[CustomPropertyDrawer(typeof(VectorPickerAttribute))]
public class VectorPickerDrawer : PropertyDrawer {
#region Variables
bool _trackMouse;
SerializedProperty _property;
MonoBehaviour script;
///<summary>Keep the currently selected object to avoid loosing focus while/after tracking</summary>
GameObject _mySelection;
///<summary>For reverting if tracking canceled</summary>
Vector2 _originalPosition;
///<summary>Flag for doing Setup only once</summary>
bool _setup;
/// <summary>Mouse position from scene view into the world.</summary>
Vector2 worldPoint;
#endregion
/// <summary>
/// Catch a click event while over the SceneView
/// </summary>
/// <param name="sceneView">The current scene view => might not work anymore with multiple SceneViews</param>
private void UpdateSceneView(SceneView sceneView) {
Camera cam = SceneView.lastActiveSceneView.camera;
worldPoint = Event.current.mousePosition;
worldPoint.y = Screen.height - worldPoint.y - 36.0f; // ??? Why that offset?!
worldPoint = cam.ScreenToWorldPoint(worldPoint);
VectorPickerAttribute vectorPicker = attribute as VectorPickerAttribute;
if(script != null && vectorPicker.relative) worldPoint -= (Vector2)script.transform.position;
// get current event
var e = Event.current;
// Only check while tracking
if(_trackMouse) {
if((e.type == EventType.MouseDown || e.type == EventType.MouseUp) && e.button == 0) {
OnTrackingEnds(false, e);
}
else {
// Prevent losing focus
Selection.activeGameObject = _mySelection;
}
}
else {
// Skip if event is Layout or Repaint
if(e.type == EventType.Layout || e.type == EventType.Repaint) return;
// Prevent Propagation
Event.current.Use();
Event.current = null;
// Unlock Inspector
ActiveEditorTracker.sharedTracker.isLocked = false;
// Prevent losing focus
Selection.activeGameObject = _mySelection;
// Remove SceneView callback
SceneView.onSceneGUIDelegate -= UpdateSceneView;
}
}
/// <summary>
/// Called when ending Tracking
/// </summary>
/// <param name="revert">flag whether to revert to previous value or not</param>
/// <param name="e">event that caused the ending</param>
/// <returns>Returns the vector value of the property that we are modifying.</returns>
private Vector2 OnTrackingEnds(bool revert, Event e) {
e.Use();
Event.current = null;
//Debug.Log("Vector Picker finished");
if(revert) {
// restore previous value
_property.vector2Value = _originalPosition;
//Debug.Log("Reverted");
}
// disable tracking
_trackMouse = false;
// Apply changes
_property.serializedObject.ApplyModifiedProperties();
return _property.vector2Value;
}
public override void OnGUI(Rect position, SerializedProperty property, GUIContent label) {
script = (MonoBehaviour)property.serializedObject.targetObject;
if(property.propertyType != SerializedPropertyType.Vector2) {
EditorGUI.HelpBox(position, "This Attribute requires Vector2", MessageType.Error);
return;
}
var e = Event.current;
if(!_setup) {
// store the selected Object (should be the one with this drawer active)
_mySelection = Selection.activeGameObject;
_property = property;
_setup = true;
}
// load current value into serialized properties
_property.serializedObject.Update();
//specific to the ONE property we are updating
bool trackingThis = _trackMouse && property.propertyPath == _property.propertyPath;
GUI.enabled = !trackingThis;
EditorGUI.PropertyField(position, property, label);
GUI.enabled = true;
// Write manually changed values to the serialized fields
_property.serializedObject.ApplyModifiedProperties();
if(!trackingThis) {
var button = new Rect(position) {
x = position.width - 2,
width = position.height
};
// if button wasn't pressed do nothing
if(!GUI.Button(button, "")) return;
// store current value in case of revert
_originalPosition = _property.vector2Value;
// enable tracking
_property = property;
_trackMouse = true;
// Lock the inspector so we cannot lose focus
ActiveEditorTracker.sharedTracker.isLocked = true;
// Prevent event propagation
e.Use();
//Debug.Log("Vector Picker started");
return;
}
// <<< This section is only reached if we are in tracking mode >>>
// Overwrite the onSceneGUIDelegate with a callback for the SceneView
SceneView.onSceneGUIDelegate = UpdateSceneView;
// Set to world position
_property.vector2Value = worldPoint;
// Track position until either Mouse button 0 (to confirm) or Escape (to cancel) is clicked
var mouseUpDown = (e.type == EventType.MouseUp || e.type == EventType.MouseDown) && e.button == 0;
if(mouseUpDown) {
// End the tracking, don't revert
property.vector2Value = OnTrackingEnds(false, e);
}
else if(e.type == EventType.KeyUp && _trackMouse && e.keyCode == KeyCode.Escape) {
// Cancel tracking via Escape => revert value
property.vector2Value = OnTrackingEnds(true, e);
}
property.serializedObject.ApplyModifiedProperties();
//This fixes "randomly stops updating for no reason".
EditorUtility.SetDirty(property.serializedObject.targetObject);
}
}
I tried to explain everything in the comments. Ofcourse this still has some flaws and might not work in some special cases but I hope it gets in the correct direction.

Oculus and Unity: grab object makes avatar fall through the floor?

I just downloaded the basic Oculus sample framework for Unity (I'm on 2018.2) and am trying to combine the TeleportAvatar and AvatarWithGrab scenes - to have a teleporting avatar able to grab objects.
All is well - I dragged an object with OVR Grabbable script (below) into the teleport scene, added OVRGrabbers to the avatar object and hit play. I can successfully pick up the object after teleporting, etc but for some reason after interacting with grabbable object a while my character falls through floor (maybe teleports below floor?).
I even added a new floor to be sure there was a box collider to prevent falling; however, this had no effect.
I looked for any overlap between the controls and even turning off teleport, but the grabbable object script is causing the fall. Why is this?
Doing the grabbing motion with right hand (teleporting hand) doesn't cause any issues if I'm not near a grab object.
/// <summary>
/// An object that can be grabbed and thrown by OVRGrabber.
/// </summary>
public class OVRGrabbable : MonoBehaviour
{
[SerializeField]
protected bool m_allowOffhandGrab = true;
[SerializeField]
protected bool m_snapPosition = false;
[SerializeField]
protected bool m_snapOrientation = false;
[SerializeField]
protected Transform m_snapOffset;
[SerializeField]
protected Collider[] m_grabPoints = null;
protected bool m_grabbedKinematic = false;
protected Collider m_grabbedCollider = null;
protected OVRGrabber m_grabbedBy = null;
/// <summary>
/// If true, the object can currently be grabbed.
/// </summary>
public bool allowOffhandGrab
{
get { return m_allowOffhandGrab; }
}
/// <summary>
/// If true, the object is currently grabbed.
/// </summary>
public bool isGrabbed
{
get { return m_grabbedBy != null; }
}
/// <summary>
/// If true, the object's position will snap to match snapOffset when grabbed.
/// </summary>
public bool snapPosition
{
get { return m_snapPosition; }
}
/// <summary>
/// If true, the object's orientation will snap to match snapOffset when grabbed.
/// </summary>
public bool snapOrientation
{
get { return m_snapOrientation; }
}
/// <summary>
/// An offset relative to the OVRGrabber where this object can snap when grabbed.
/// </summary>
public Transform snapOffset
{
get { return m_snapOffset; }
}
/// <summary>
/// Returns the OVRGrabber currently grabbing this object.
/// </summary>
public OVRGrabber grabbedBy
{
get { return m_grabbedBy; }
}
/// <summary>
/// The transform at which this object was grabbed.
/// </summary>
public Transform grabbedTransform
{
get { return m_grabbedCollider.transform; }
}
/// <summary>
/// The Rigidbody of the collider that was used to grab this object.
/// </summary>
public Rigidbody grabbedRigidbody
{
get { return m_grabbedCollider.attachedRigidbody; }
}
/// <summary>
/// The contact point(s) where the object was grabbed.
/// </summary>
public Collider[] grabPoints
{
get { return m_grabPoints; }
}
/// <summary>
/// Notifies the object that it has been grabbed.
/// </summary>
virtual public void GrabBegin(OVRGrabber hand, Collider grabPoint)
{
m_grabbedBy = hand;
m_grabbedCollider = grabPoint;
gameObject.GetComponent<Rigidbody>().isKinematic = true;
}
/// <summary>
/// Notifies the object that it has been released.
/// </summary>
virtual public void GrabEnd(Vector3 linearVelocity, Vector3 angularVelocity)
{
Rigidbody rb = gameObject.GetComponent<Rigidbody>();
rb.isKinematic = m_grabbedKinematic;
rb.velocity = linearVelocity;
rb.angularVelocity = angularVelocity;
m_grabbedBy = null;
m_grabbedCollider = null;
}
void Awake()
{
if (m_grabPoints.Length == 0)
{
// Get the collider from the grabbable
Collider collider = this.GetComponent<Collider>();
if (collider == null)
{
throw new ArgumentException("Grabbables cannot have zero grab points and no collider -- please add a grab point or collider.");
}
// Create a default grab point
m_grabPoints = new Collider[1] { collider };
}
}
protected virtual void Start()
{
m_grabbedKinematic = GetComponent<Rigidbody>().isKinematic;
}
void OnDestroy()
{
if (m_grabbedBy != null)
{
// Notify the hand to release destroyed grabbables
m_grabbedBy.ForceRelease(this);
}
}
}

Script runs in Unity Preview but not on HoloLens

I'm new to C#, HoloLens, and Unity. I want to add the voice command functionality from Holograms 101 to Holograms 230.
Having completed Holograms 230, I've taken the SpeechManager script from Holograms 101, added some Debug.Log commands, and dragged it onto an object in Unity's "Hierarchy" panel. When I click the "Play" button in Unity, my script runs (as verified by the fact that my Debug.Log appears in the status bar). But when I build and deploy it to the HoloLens (via "Debug -> Start Debugging" in Visual Studio), the Debug.Logs from my new script don't show up in Visual Studio's "Output" panel, even through Debug.Logs from other scripts do.
Here's my SpeechManager.cs:
using System.Collections;
using System.Collections.Generic;
using System.Linq;
using UnityEngine;
using UnityEngine.Windows.Speech;
public class SpeechManager : MonoBehaviour {
KeywordRecognizer keywordRecognizer = null;
Dictionary<string, System.Action> keywords = new Dictionary<string, System.Action>();
// Use this for initialization
void Start () {
Debug.Log("!!Starting speech manager");
keywords.Add("Place item", () =>
{
Debug.Log("!!Place item");
});
// Tell the KeywordRecognizer about our keywords.
keywordRecognizer = new KeywordRecognizer(keywords.Keys.ToArray());
// Register a callback for the KeywordRecognizer and start recognizing!
keywordRecognizer.OnPhraseRecognized += KeywordRecognizer_OnPhraseRecognized;
keywordRecognizer.Start();
}
private void KeywordRecognizer_OnPhraseRecognized(PhraseRecognizedEventArgs args)
{
System.Action keywordAction;
if (keywords.TryGetValue(args.text, out keywordAction))
{
keywordAction.Invoke();
}
}
}
Here's another script, attached to the same object in Unity, that functions correctly and triggers Debug.Logs:
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Windows.Speech;
using Academy.HoloToolkit.Unity;
/// <summary>
/// The SurfaceManager class allows applications to scan the environment for a specified amount of time
/// and then process the Spatial Mapping Mesh (find planes, remove vertices) after that time has expired.
/// </summary>
public class PlaySpaceManager : Singleton<PlaySpaceManager>
{
[Tooltip("When checked, the SurfaceObserver will stop running after a specified amount of time.")]
public bool limitScanningByTime = true;
[Tooltip("How much time (in seconds) that the SurfaceObserver will run after being started; used when 'Limit Scanning By Time' is checked.")]
public float scanTime = 30.0f;
[Tooltip("Material to use when rendering Spatial Mapping meshes while the observer is running.")]
public Material defaultMaterial;
[Tooltip("Optional Material to use when rendering Spatial Mapping meshes after the observer has been stopped.")]
public Material secondaryMaterial;
[Tooltip("Minimum number of floor planes required in order to exit scanning/processing mode.")]
public uint minimumFloors = 1;
[Tooltip("Minimum number of wall planes required in order to exit scanning/processing mode.")]
public uint minimumWalls = 1;
/// <summary>
/// Indicates if processing of the surface meshes is complete.
/// </summary>
private bool meshesProcessed = false;
/// <summary>
/// GameObject initialization.
/// </summary>
private void Start()
{
// Update surfaceObserver and storedMeshes to use the same material during scanning.
SpatialMappingManager.Instance.SetSurfaceMaterial(defaultMaterial);
// Register for the MakePlanesComplete event.
SurfaceMeshesToPlanes.Instance.MakePlanesComplete += SurfaceMeshesToPlanes_MakePlanesComplete;
}
/// <summary>
/// Called once per frame.
/// </summary>
private void Update()
{
// Check to see if the spatial mapping data has been processed
// and if we are limiting how much time the user can spend scanning.
if (!meshesProcessed && limitScanningByTime)
{
// If we have not processed the spatial mapping data
// and scanning time is limited...
// Check to see if enough scanning time has passed
// since starting the observer.
if (limitScanningByTime && ((Time.time - SpatialMappingManager.Instance.StartTime) < scanTime))
{
// If we have a limited scanning time, then we should wait until
// enough time has passed before processing the mesh.
}
else
{
// The user should be done scanning their environment,
// so start processing the spatial mapping data...
/* TODO: 3.a DEVELOPER CODING EXERCISE 3.a */
// 3.a: Check if IsObserverRunning() is true on the
// SpatialMappingManager.Instance.
if (SpatialMappingManager.Instance.IsObserverRunning())
{
// 3.a: If running, Stop the observer by calling
// StopObserver() on the SpatialMappingManager.Instance.
SpatialMappingManager.Instance.StopObserver();
Debug.Log("!!Stop Observer!!");
}
// 3.a: Call CreatePlanes() to generate planes.
CreatePlanes();
// 3.a: Set meshesProcessed to true.
meshesProcessed = true;
}
}
}
/// <summary>
/// Handler for the SurfaceMeshesToPlanes MakePlanesComplete event.
/// </summary>
/// <param name="source">Source of the event.</param>
/// <param name="args">Args for the event.</param>
private void SurfaceMeshesToPlanes_MakePlanesComplete(object source, System.EventArgs args)
{
/* TODO: 3.a DEVELOPER CODING EXERCISE 3.a */
// Collection of floor and table planes that we can use to set horizontal items on.
List<GameObject> horizontal = new List<GameObject>();
// Collection of wall planes that we can use to set vertical items on.
List<GameObject> vertical = new List<GameObject>();
// 3.a: Get all floor and table planes by calling
// SurfaceMeshesToPlanes.Instance.GetActivePlanes().
// Assign the result to the 'horizontal' list.
horizontal = SurfaceMeshesToPlanes.Instance.GetActivePlanes(PlaneTypes.Table | PlaneTypes.Floor);
// 3.a: Get all wall planes by calling
// SurfaceMeshesToPlanes.Instance.GetActivePlanes().
// Assign the result to the 'vertical' list.
vertical = SurfaceMeshesToPlanes.Instance.GetActivePlanes(PlaneTypes.Wall);
// Check to see if we have enough horizontal planes (minimumFloors)
// and vertical planes (minimumWalls), to set holograms on in the world.
if (horizontal.Count >= minimumFloors && vertical.Count >= minimumWalls)
{
// We have enough floors and walls to place our holograms on...
// 3.a: Let's reduce our triangle count by removing triangles
// from SpatialMapping meshes that intersect with our active planes.
// Call RemoveVertices().
// Pass in all activePlanes found by SurfaceMeshesToPlanes.Instance.
RemoveVertices(SurfaceMeshesToPlanes.Instance.ActivePlanes);
// 3.a: We can indicate to the user that scanning is over by
// changing the material applied to the Spatial Mapping meshes.
// Call SpatialMappingManager.Instance.SetSurfaceMaterial().
// Pass in the secondaryMaterial.
SpatialMappingManager.Instance.SetSurfaceMaterial(secondaryMaterial);
// 3.a: We are all done processing the mesh, so we can now
// initialize a collection of Placeable holograms in the world
// and use horizontal/vertical planes to set their starting positions.
// Call SpaceCollectionManager.Instance.GenerateItemsInWorld().
// Pass in the lists of horizontal and vertical planes that we found earlier.
SpaceCollectionManager.Instance.GenerateItemsInWorld(horizontal, vertical);
}
else
{
// We do not have enough floors/walls to place our holograms on...
// 3.a: Re-enter scanning mode so the user can find more surfaces by
// calling StartObserver() on the SpatialMappingManager.Instance.
SpatialMappingManager.Instance.StartObserver();
// 3.a: Re-process spatial data after scanning completes by
// re-setting meshesProcessed to false.
meshesProcessed = false;
}
}
/// <summary>
/// Creates planes from the spatial mapping surfaces.
/// </summary>
private void CreatePlanes()
{
// Generate planes based on the spatial map.
SurfaceMeshesToPlanes surfaceToPlanes = SurfaceMeshesToPlanes.Instance;
if (surfaceToPlanes != null && surfaceToPlanes.enabled)
{
surfaceToPlanes.MakePlanes();
}
}
/// <summary>
/// Removes triangles from the spatial mapping surfaces.
/// </summary>
/// <param name="boundingObjects"></param>
private void RemoveVertices(IEnumerable<GameObject> boundingObjects)
{
RemoveSurfaceVertices removeVerts = RemoveSurfaceVertices.Instance;
if (removeVerts != null && removeVerts.enabled)
{
removeVerts.RemoveSurfaceVerticesWithinBounds(boundingObjects);
}
}
/// <summary>
/// Called when the GameObject is unloaded.
/// </summary>
private void OnDestroy()
{
if (SurfaceMeshesToPlanes.Instance != null)
{
SurfaceMeshesToPlanes.Instance.MakePlanesComplete -= SurfaceMeshesToPlanes_MakePlanesComplete;
}
}
}

Monogame fixed moving model disappears

So I'm trying to create an interactive environment using a 3D model. I have the model and camera moving on a fixed Z-axis increment but after 3 or so seconds the model just disappears. Not sure what's happening, help is very appreciated.
My Game code is posted below.
namespace model_viewer
{
/// <summary>
/// This is the main type for your game
/// </summary>
public class Game1 : Game
{
GraphicsDeviceManager graphics;
SpriteBatch spriteBatch;
//Loads the ship
Model Ship;
//Moves the ship and the camera together
float moveCamera;
float moveShip;
//moves the ship in the user's direction
float keyMoveX;
float keyMoveY;
public Game1()
: base()
{
graphics = new GraphicsDeviceManager(this);
Content.RootDirectory = "Content";
}
/// <summary>
/// Allows the game to perform any initialization it needs to before starting to run.
/// This is where it can query for any required services and load any non-graphic
/// related content. Calling base.Initialize will enumerate through any components
/// and initialize them as well.
/// </summary>
protected override void Initialize()
{
moveCamera = -3;
base.Initialize();
}
/// <summary>
/// LoadContent will be called once per game and is the place to load
/// all of your content.
/// </summary>
protected override void LoadContent()
{
// Create a new SpriteBatch, which can be used to draw textures.
spriteBatch = new SpriteBatch(GraphicsDevice);
Ship = Content.Load<Model>("Graphics/Ship");
// TODO: use this.Content to load your game content here
}
/// <summary>
/// UnloadContent will be called once per game and is the place to unload
/// all content.
/// </summary>
protected override void UnloadContent()
{
// TODO: Unload any non ContentManager content here
}
/// <summary>
/// Allows the game to run logic such as updating the world,
/// checking for collisions, gathering input, and playing audio.
/// </summary>
/// <param name="gameTime">Provides a snapshot of timing values.</param>
protected override void Update(GameTime gameTime)
{
if (GamePad.GetState(PlayerIndex.One).Buttons.Back == ButtonState.Pressed || Keyboard.GetState().IsKeyDown(Keys.Escape))
Exit();
moveCamera += 0.005f;
moveShip += 0.005f;
base.Update(gameTime);
}
/// <summary>
/// This is called when the game should draw itself.
/// </summary>
/// <param name="gameTime">Provides a snapshot of timing values.</param>
protected override void Draw(GameTime gameTime)
{
GraphicsDevice.Clear(Color.CornflowerBlue);
Matrix proj = Matrix.CreatePerspectiveFieldOfView(MathHelper.PiOver2, 1, 0.1f, 10000.0f);
Matrix view = Matrix.CreateLookAt(
new Vector3(0, 0, moveCamera),
Vector3.Zero,
Vector3.Up);
float scale = 1.0f / Ship.Meshes[0].BoundingSphere.Radius;
Matrix world = Matrix.CreateScale(scale) * Matrix.CreateRotationY(MathHelper.ToRadians(180)) * Matrix.CreateTranslation(new Vector3(0, 0, moveShip));
Ship.Draw(world, view, proj);
// TODO: Add your drawing code here
base.Draw(gameTime);
}
}
}
I found the answer, my 'view' matrix was targeting a point that wasn't moving with my model and was rotating to view the point.