Unity Object Detection: Barracuda, MobileNET and Webcam - unity3d

I am trying to run the following ONNX model in Unity, using Barracuda and the following pre-trained model:
https://github.com/onnx/models/tree/master/vision/classification/mobilenet
with my webcam as the camera, and using the following script to test the system:
using UnityEngine;
using UnityEngine.UI;
using Unity.Barracuda;
using System.IO;
using System.Linq;
public class Webcam : MonoBehaviour
{
public NNModel mob_net;
public Model model;
private IWorker worker;
int current_cam_index = 0;
WebCamTexture tex;
public RawImage display;
private bool brain_on = false;
private const int SIZE = 224;
public void start_cam()
{
model = ModelLoader.Load(mob_net);
worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, model);
if (tex != null)
{
stop_cam();
}
else
{
WebCamDevice device = WebCamTexture.devices[current_cam_index];
tex = new WebCamTexture(device.name);
display.texture = tex;
tex.Play();
}
}
public void stop_cam()
{
display.texture = null;
tex.Stop();
tex = null;
}
void crop__normalize_inference(WebCamTexture src)
{
int x = Mathf.FloorToInt(display.transform.position.x);
int y = Mathf.FloorToInt(display.transform.position.y);
Color[] pix = src.GetPixels(x, y, SIZE, SIZE);
Texture2D dest = new Texture2D(SIZE, SIZE);
dest.SetPixels(pix);
dest.Apply();
float[] floats = new float[224 * 224 * 3];
for (int i = 0; i < pix.Length; ++i)
{
var color = pix[i];
floats[i * 3 + 0] = (color.r - 127) / 127.5f;
floats[i * 3 + 1] = (color.g - 127) / 127.5f;
floats[i * 3 + 2] = (color.b - 127) / 127.5f;
}
Tensor in_tensor = new Tensor(1, 224, 224, 3, floats);
worker.Execute(in_tensor);
Tensor out_tensor = worker.PeekOutput("MobilenetV2/Predictions/Reshape_1");
var max = Mathf.Max(out_tensor.ToReadOnlyArray());
var arr = out_tensor.ToReadOnlyArray();
var index = System.Array.IndexOf(arr, max);
string line = File.ReadLines(#"D:\Unity\WebCam\Cam\Assets\Scenes\mobile_net.txt").Skip(index).Take(1).First();
Debug.Log(line);
in_tensor.Dispose();
out_tensor.Dispose();
worker.Dispose();
}
public void brain()
{
brain_on = !brain_on;
}
private void Update()
{
if (brain_on)
{
crop_and_normalize(tex);
brain_on = false;
}
}
}
But when I run it I get an error which says:
ArgumentException: Can only specify one unknown dimension
My guess is that either Unity doesn't support the model, or that for some reason my input tensor form is incorrect . . .
Any help would be massively appreciated,
K

Solution: Set to barracuda V1.3.0

Related

Graphics.DrawMesh has wired offset while scaling in Unity, why?

I'm implementing after image effect currently and I meet a problem with Graphics.DrawMesh. The code shows below
public class AfterImage3DByCombine : MonoBehaviour
{
public class AfterImange
{
public Mesh mesh;
public Material material;
// public Matrix4x4 matrix;
public float duration;
public float time;
}
protected SkinnedMeshRenderer[] skinRenderers;
protected MeshFilter[] filters;
protected int filtersCount = 0;
public bool IncludeMeshFilter = true;
public Material EffectMaterial;
public float Duration = 5;
public float Interval = 0.2f;
public float FadeoutTime = 1;
private float mTime = 5;
private List<AfterImange> mAfterImageList = new List<AfterImange>();
protected virtual void Awake()
{
skinRenderers = GetComponentsInChildren<SkinnedMeshRenderer>();
if (IncludeMeshFilter)
{
filters = GetComponentsInChildren<MeshFilter>();
filtersCount = filters.Length;
}
}
//call from another place to have after image effect
public void Play()
{
if (skinRenderers.Length + filtersCount <= 0)
{
return;
}
mTime = Duration;
StartCoroutine(AddAfterImage());
}
IEnumerator AddAfterImage()
{
while (mTime > 0)
{
CreateImage();
yield return new WaitForSeconds(Interval);
mTime -= Interval;
}
yield return null;
}
void CreateImage()
{
CombineInstance[] combineInstances = new CombineInstance[skinRenderers.Length + filtersCount];
int index = 0;
for (int i = 0; i < skinRenderers.Length; i++)
{
var render = skinRenderers[i];
var mesh = new Mesh();
render.BakeMesh(mesh);
combineInstances[index] = new CombineInstance
{
mesh = mesh,
transform = render.gameObject.transform.localToWorldMatrix,
subMeshIndex = 0
};
index++;
}
for (int i = 0; i < filtersCount; i++)
{
var render = filters[i];
var temp = (render.sharedMesh != null) ? render.sharedMesh : render.mesh;
var mesh = (Mesh)Instantiate(temp);
combineInstances[index] = new CombineInstance
{
mesh = mesh,
transform = render.gameObject.transform.localToWorldMatrix,
subMeshIndex = 0
};
index++;
}
Mesh combinedMesh = new Mesh();
combinedMesh.CombineMeshes(combineInstances, true, true);
mAfterImageList.Add(new AfterImange
{
mesh = combinedMesh,
material = new Material(EffectMaterial),
time = FadeoutTime,
duration = FadeoutTime,
});
}
void LateUpdate()
{
bool needRemove = false;
foreach (var image in mAfterImageList)
{
image.time -= Time.deltaTime;
if (image.material.HasProperty("_Color"))
{
Color color = Color.red;
color.a = Mathf.Max(0, image.time / image.duration);
image.material.SetColor("_Color", color);
}
Matrix4x4 mat = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, Vector3.one * 2f);
//public static void DrawMesh(Mesh mesh, Matrix4x4 matrix, Material material, int layer, Camera camera, int submeshIndex, MaterialPropertyBlock properties, ShadowCastingMode castShadows);
Graphics.DrawMesh(image.mesh, Matrix4x4.identity, image.material, gameObject.layer, null, 0, null, false);
if (image.time <= 0)
{
needRemove = true;
}
}
if (needRemove)
{
mAfterImageList.RemoveAll(x => x.time <= 0);
}
}
}
Since my prefab has 0.5 times scaling while it's running, then I pass a matrix with two times scaling Matrix4x4 mat = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, Vector3.one * 2f); into
Graphics.DrawMesh.
Then, the mesh created by Graphics.DrawMesh isn't in its original position, there is an offset between original mesh and created mesh.
And if I passed Matrix4x4.Identity into Graphics.DrawMesh, the created mesh will have 0.5 times scaling, which looks smaller than original mesh.
Why there is an offset and how could I eliminate the offset without chaning the prefab's scale?

How to hide and show the 3d-Object in augmented reality with unity?

I created the augmented reality with User Defined Target.(throgh Vuforia core example).
The augmented reality works fine but I am unable to hide and show the
3d-Object in unity. I used the code of setActive(false) for the object, but does not work in android application. I want to implement this in android.
using System;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Networking;
using System.Net;
using System.IO;
using System.Text;
using System.Xml.Linq;
using UnityEngine.SceneManagement;
public class first : MonoBehaviour {
public UnityEngine.UI.Text responseText;
public GameObject silo1;
public GameObject silo2;
public GameObject silo3;
public GameObject silo4;
public GameObject silo5;
public Color colorStart = Color.red;
public Color colorEnd = Color.green;
public float duration = 1.0F;
private float xS = 0;
private float yS = 0;
private float zS = 0;
private float xO = 0;
private float yO = 0;
private float zO = 0;
public void Request()
{
try
{
silo1.SetActive(true);
silo2.SetActive(false);
silo3.SetActive(false);
silo4.SetActive(false);
silo5.SetActive(false);
/////////////////////////////////////////////////////////////////////////////////
string url = "http://dev-lafargeholcim.online.indeff.com/Thingworx/Things/UnityApplicationThing/Services/displayThingWorxData";
var request = UnityWebRequest.Post(url, "");
request.SetRequestHeader("Content-Type", "application/json");
request.SetRequestHeader("Accept", "text/csv");
request.SetRequestHeader("appKey", "d0b8e896-8690-416c-95d5-965165986234");
StartCoroutine(onResponse(request));
//////////////////////////////////////////////////////////////Post request//////////////////////////
//Naming to the game object
//silo.name = "silo";
// Debug.Log("Mesh Length:"+ silo.GetComponent<Renderer>().material.mainTexture.width);
// Destroy(silo);
float lerp = Mathf.PingPong(Time.time, duration) / duration;
silo1.GetComponent<Renderer>().material.color = Color.Lerp(colorStart, colorEnd, lerp);
if (yS >= 3) { return; }
else
{
duration = duration + 1;
xS = silo1.transform.localScale.x;
yS = silo1.transform.localScale.y;
zS = silo1.transform.localScale.z;
xO = silo1.transform.position.x;
yO = silo1.transform.position.y;
zO = silo1.transform.position.z;
transform.position = silo1.transform.position - Vector3.down * -10f;
yS = yS + Convert.ToSingle(0.3);
xS = xS;
zS = zS;
yO = yO;
xO = xO;
zO = zO + Convert.ToSingle(0.03);
silo1.transform.localScale = new Vector3(xS, yS, zS);
silo1.transform.position = new Vector3(xO, yO, zO);
Debug.Log("Silo Transformation\t" + silo1.transform.localScale + "\nTrasform Position\t" + silo1.transform.position);
}
}
catch (Exception e) { Debug.Log("ERROR : " + e.Message); }
}
private IEnumerator onResponse(UnityWebRequest req)
{
Debug.Log("Response function get called");
yield return req.SendWebRequest();
if (req.isNetworkError)
Debug.Log("Network error has occured: " + req.GetResponseHeader(""));
else
Debug.Log("Success " + req.downloadHandler.text);
responseText.text = req.downloadHandler.text;
byte[] results = req.downloadHandler.data;
Debug.Log("Second Success\t" + responseText.text);
}
}

Switching active states of two GameObjects in Unity3D

With Unity3D I am trying to create a scene with an alpha texture as a silhouette, which upon looking up is added, then looking down removes.
Currently I have the exposure of an equirectangular image changing on look up, but my silhouette object says I have not assigned it to an instance:
As you can see from the console, it is eventualy recognised, but I cannot set the active state. This is the current state of my code being applied to the scene:
using UnityEngine;
using System.Collections;
public class switchScript : MonoBehaviour {
public Cardboard cb;
public Renderer leftEyeDay;
public Renderer rightEyeDay;
private GameObject[] dayObjects;
public GameObject nightObject;
void Start () {
MeshFilter filter = GetComponent(typeof (MeshFilter)) as MeshFilter;
if (filter != null) {
Mesh mesh = filter.mesh;
Vector3[] normals = mesh.normals;
for (int i=0;i<normals.Length;i++)
normals[i] = -normals[i];
mesh.normals = normals;
for (int m=0;m<mesh.subMeshCount;m++)
{
int[] triangles = mesh.GetTriangles(m);
for (int i=0;i<triangles.Length;i+=3)
{
int temp = triangles[i + 0];
triangles[i + 0] = triangles[i + 1];
triangles[i + 1] = temp;
}
mesh.SetTriangles(triangles, m);
}
}
}
// Update is called once per frame
void Update () {
float xAngle = cb.HeadPose.Orientation.eulerAngles.x;
if (isLookingUp (xAngle)) {
var exposureValue = getExposureValue (xAngle);
leftEyeDay.material.SetFloat ("_Exposure", exposureValue);
rightEyeDay.material.SetFloat ("_Exposure", exposureValue);
toggleNight ();
} else {
leftEyeDay.material.SetFloat ("_Exposure", 1F);
rightEyeDay.material.SetFloat ("_Exposure", 1F);
toggleNight ();
}
}
public bool isLookingUp (float xAngle) {
return xAngle > 270 && xAngle < 340;
}
public float getExposureValue (float xAngle) {
var _xAngle = Mathf.Clamp (xAngle, 320, 340);
return ScaleValue (320.0F, 340.0F, 0.3F, 1.0F, _xAngle);
}
public float ScaleValue (float from1, float to1, float from2, float to2, float v) {
return from2 + (v - from1) * (to2 - from2) / (to1 - from1);
}
void toggleDay() {
print (nightObject);
nightObject = GameObject.FindGameObjectWithTag ("Night");
nightObject.SetActive (false);
}
void toggleNight() {
print (nightObject);
nightObject = GameObject.FindGameObjectWithTag ("Night");
nightObject.SetActive (true);
}
}
GameObject.FindGameObjectWithTag ("Night") returns null when the whole object is set to !active. Just toggle the scripts on that object instead of the whole GO.
Ok, if anyone needs this, this is an easy way to create scene switch:
void Awake() {
silhouetteObject = GameObject.FindGameObjectWithTag ("Night");
silhouetteObject.GetComponent<Renderer>().enabled = false;
}

Merge textures at Runtime

Is there any way to "bake" one texture to another, except for using SetPixels()?
Now i'm trying to use something like that, but it too slow:
public static Texture2D CombineTextures(Texture2D aBaseTexture, Texture2D aToCopyTexture, int x, int y)
{
int aWidth = aBaseTexture.width;
int aHeight = aBaseTexture.height;
int bWidth = aToCopyTexture.width;
int bHeight = aToCopyTexture.height;
Texture2D aReturnTexture = new Texture2D(aWidth, aHeight, TextureFormat.RGBA32, false);
Color[] aBaseTexturePixels = aBaseTexture.GetPixels();
Color[] aCopyTexturePixels = aToCopyTexture.GetPixels();
int aPixelLength = aBaseTexturePixels.Length;
for(int y1 = y, y2 = 0; y1 < aHeight && y2 < bHeight ; y1++, y2++)
{
for(int x1 = x, x2 = 0 ; x1 < aWidth && x2 < bWidth; x1++, x2++)
{
aBaseTexturePixels[x1 + y1*aWidth] = Color.Lerp(aBaseTexturePixels[x1 + y1*aWidth], aCopyTexturePixels[x2 + y2*bWidth], aCopyTexturePixels[x2 + y2*bWidth].a);
}
}
aReturnTexture.SetPixels(aBaseTexturePixels);
aReturnTexture.Apply(false);
return aReturnTexture;
}
The problem is, that i need to display a lot of sprites on 2d surface (blood, enemy corpses, etc.), and just instantiating every sprite will greatly reduce fps.
If you are concerned about fps drop when instantiating prefabs you should definitely build a Object pooling system. So you will have a system that:
Instantiating all objects in the pool and keep it far away from the main camera
Once you need the object you will "borrow" it from the pool
Once object is not needed anymore you will return it back to the object pool (for example when sprite is out the camera view
Baking it all to one texture isn't the best practice. You will need huge amounts of RAM for this. Consider steps above, its very common practice
Good example here:
using UnityEngine;
using System.Collections;
using System.Collections.Generic;
using System;
using System.Linq;
public class BackgroundPool : MonoBehaviour
{
public static BackgroundPool instance;
public List<BackgroundSection> sectionsLibrary = new List<BackgroundSection>();
public int poolSize = 4;
public List<BackgroundSection> pool = new List<BackgroundSection>();
void Awake()
{
instance = this;
DateTime startGenTime = DateTime.Now;
//generateSectionsPool
for (int i=0; i<sectionsLibrary.Count; i++)
{
for (int j=0; j<poolSize; j++)
{
if (j == 0)
{
sectionsLibrary[i].positionInPool = sectionsLibrary[i].transform.position;
pool.Add(sectionsLibrary[i]);
}
else
{
BackgroundSection section = (BackgroundSection)Instantiate(sectionsLibrary[i]);
section.transform.parent = this.transform;
section.transform.position = new Vector3((-(ExtensionMethods.GetBounds(sectionsLibrary[i].gameObject).extents.x * 2) * j) + sectionsLibrary[i].transform.position.x,
sectionsLibrary[i].transform.position.y);
section.transform.localEulerAngles = Vector3.zero;
section.gameObject.name = sectionsLibrary[i].gameObject.name + ":" + j.ToString();
section.positionInPool = section.transform.position;
pool.Add(section);
}
}
}
Debug.Log("Background Pool generated in: " + (DateTime.Now - startGenTime).TotalSeconds.ToString() + " s");
}
public BackgroundSection GetPiece(Scenery scenery, SceneryLayer _layer)
{
List<BackgroundSection> allScenery = new List<BackgroundSection>();
foreach (BackgroundSection section in pool) { if (section.scenery == scenery) allScenery.Add(section); }
List<BackgroundSection> matchingPieces = new List<BackgroundSection>();
foreach (BackgroundSection section in allScenery) { if (section.sceneryLayer == _layer) matchingPieces.Add(section); }
if (matchingPieces.Count > 0)
{
BackgroundSection pickedSection = matchingPieces[UnityEngine.Random.Range(0,matchingPieces.Count-1)];
pool.Remove(pickedSection);
return pickedSection;
}
else
{
Debug.LogError("Cann't get background piece matching criteria, scenery: " + scenery + ", layer" + _layer);
return null;
}
}
public void ReturnPiece(BackgroundSection section)
{
pool.Add(section);
section.transform.parent = this.transform;
section.transform.position = section.positionInPool;
}
}

Draw a GraphicsPath in a PDF

I am trying to create a PDF based on some vector art I have in my C# application. I have two major problems when I try to map the points and types from a GraphicsPath.
Some paths are just plain missing.
When a sub path is an internal boundary I need to indicate that somehow. ie, the circle in the letter d is filled in.
I'm referencing iTextSharp 5.5.2 on NuGet. I'm only using AddString here because I want an easy way to demonstrate creating a complex path in this example. For my need I won't be using a path to place text in the PDF.
using iTextSharp.text;
using iTextSharp.text.pdf;
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Drawing.Drawing2D;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace PdfGen
{
class StackQuestion
{
public static void Main()
{
string filename = #"d:\itext.pdf";
using (var doc = new Document())
using (var fs = new FileStream(filename, FileMode.Create))
{
var writer = PdfWriter.GetInstance(doc, fs);
doc.Open();
writer.DirectContent.SetColorFill(BaseColor.BLACK);
var path = new GraphicsPath(FillMode.Winding);
path.AddString("Hello World", FontFamily.GenericSerif,
(int)FontStyle.Regular, 25f, RectangleF.Empty,
StringFormat.GenericDefault);
AddPath(path, writer.DirectContent);
doc.Close();
}
System.Diagnostics.Process.Start(filename);
}
static void AddPath(GraphicsPath path, PdfContentByte to)
{
var view = to.PdfDocument.PageSize;
path.FillMode = System.Drawing.Drawing2D.FillMode.Winding;
var d = path.PathData;
for (int i = 0; i < d.Points.Length && i < d.Types.Length; i++)
{
var t = (PathPointType)d.Types[i];
var p = Fix(d.Points[i], view);
if (Match(t, PathPointType.Bezier))
{
var p2 = Fix(d.Points[++i], view);
if (d.Types.Length > i + 1 &&
Match((PathPointType)d.Types[i + 1],
PathPointType.Bezier3))
{
var p3 = Fix(d.Points[++i], view);
to.CurveTo(p.X, p.Y, p2.X, p2.Y, p3.X, p3.Y);
}
else
{
to.CurveTo(p.X, p.Y, p2.X, p2.Y);
}
}
if (Match(t, PathPointType.Line))
{
to.LineTo(p.X, p.Y);
}
if (Match(t, PathPointType.CloseSubpath))
{
to.ClosePath();
to.EoFill();
}
if (t == PathPointType.Start)
{
to.NewPath();
to.MoveTo(p.X, p.Y);
}
}
}
static bool Match(PathPointType type, PathPointType match)
{
return (type & match) == match;
}
static System.Drawing.PointF Fix(System.Drawing.PointF pt,
iTextSharp.text.Rectangle view)
{
return new System.Drawing.PointF(pt.X, view.Height - pt.Y);
}
}
}
I'm posting an answer to myself in case anyone else is in need of a simple function to plot out a GraphicsPath in iTextSharp. I had two problems with my sample code in the question:
as mkl pointed out I was trying to fill too often
I failed to notice that PathPointType.Line is a valid mask in PathPointType.Bezier so the code placing a line back to the origin after a curve.
Updatd Code:
using iTextSharp.text;
using iTextSharp.text.pdf;
using System;
using System.Drawing;
using System.Drawing.Drawing2D;
using System.IO;
namespace PdfGen
{
class StackQuestion
{
public static void Main()
{
string filename = #"d:\itext.pdf";
using (var doc = new Document())
using (var fs = new FileStream(filename, FileMode.Create))
{
var writer = PdfWriter.GetInstance(doc, fs);
doc.Open();
writer.DirectContent.SetColorFill(BaseColor.BLACK);
var path = new GraphicsPath(FillMode.Winding);
path.AddString("Hello World", FontFamily.GenericSansSerif,
(int)FontStyle.Regular, 90f, PointF.Empty,
StringFormat.GenericDefault);
AddPath(path, writer.DirectContent);
writer.DirectContent.EoFill();
doc.Close();
}
System.Diagnostics.Process.Start(filename);
}
static void AddPath(GraphicsPath path, PdfContentByte to)
{
var view = to.PdfDocument.PageSize;
var d = path.PathData;
to.NewPath();
PointF? start = null;
for (int i = 0; i < d.Points.Length && i < d.Types.Length; i++)
{
var t = (PathPointType)d.Types[i];
var p = Fix(d.Points[i], view);
if (Match(t, PathPointType.Bezier))
{
var p2 = Fix(d.Points[++i], view);
if (d.Types.Length > i + 1 &&
Match((PathPointType)d.Types[i + 1],
PathPointType.Bezier))
{
var p3 = Fix(d.Points[++i], view);
to.CurveTo(p.X, p.Y, p2.X, p2.Y, p3.X, p3.Y);
}
else
{
to.CurveTo(p.X, p.Y, p2.X, p2.Y);
}
}
else if (Match(t, PathPointType.Line))
{
to.LineTo(p.X, p.Y);
}
if (Match(t, PathPointType.CloseSubpath))
{
if (start != null)
to.LineTo(start.Value.X, start.Value.Y);
start = null;
to.ClosePath();
}
if (t == PathPointType.Start)
{
if (start != null)
to.LineTo(start.Value.X, start.Value.Y);
start = p;
to.MoveTo(p.X, p.Y);
}
}
}
static bool Match(PathPointType type, PathPointType match)
{
return (type & match) == match;
}
static System.Drawing.PointF Fix(System.Drawing.PointF pt,
iTextSharp.text.Rectangle view)
{
return new System.Drawing.PointF(pt.X, view.Height - pt.Y);
}
}
}