public class testEmguCV : MonoBehaviour
{
private Capture capture;
void Start()
{
capture = new Capture();
}
void Update()
{
Image<Gray, Byte> currentFrame = capture.QueryGrayFrame();
Bitmap bitmapCurrentFrame = currentFrame.ToBitmap();
MemoryStream m = new MemoryStream();
bitmapCurrentFrame.Save(m, bitmapCurrentFrame.RawFormat);
Texture2D camera = new Texture2D(400, 400);
if (currentFrame != null)
{
camera.LoadImage(m.ToArray());
renderer.material.mainTexture = camera;
}
}
}
I used above code to convert between camera feed from emgucv camera to texture2d in unity but i am having problem with bitmapCurrentFrame.Save(m, bitmapCurrentFrame.RawFormat);
it is giving following errors
ArgumentNullException: Argument cannot be null. Parameter name:
encoder System.Drawing.Image.Save (System.IO.Stream stream,
System.Drawing.Imaging.ImageCodecInfo encoder,
System.Drawing.Imaging.EncoderParameters encoderParams)
System.Drawing.Image.Save (System.IO.Stream stream,
System.Drawing.Imaging.ImageFormat format) (wrapper
remoting-invoke-with-check) System.Drawing.Image:Save
(System.IO.Stream,System.Drawing.Imaging.ImageFormat)
WebcamUsingEmgucv.Update () (at Assets/WebcamUsingEmgucv.cs:51)
After several hours of thinking and searching i dont know what is going on please help
I used your example in our project, thanks! But i modified it to :
void Update()
{
if(capture == null)
{
Debug.LogError("Capture is null");
return;
}
Image<Gray, Byte> currentFrame = capture.QueryGrayFrame();
MemoryStream m = new MemoryStream();
currentFrame.Bitmap.Save(m, currentFrame.Bitmap.RawFormat);
Texture2D camera = new Texture2D(400, 400);
if (currentFrame != null)
{
camera.LoadImage(m.ToArray());
renderer.material.mainTexture = camera;
}
}
And it is work! Fps average ~30-35. Good luck!
Try to use this:
https://github.com/neutmute/emgucv/blob/3ceb85cba71cf957d5e31ae0a70da4bbf746d0e8/Emgu.CV/PInvoke/Unity/TextureConvert.cs
it has something like this:
public static Texture2D ImageToTexture2D<TColor, TDepth>(Image<TColor, TDepth> image, bool correctForVerticleFlip)
where TColor : struct, IColor
where TDepth : new()
{
Size size = image.Size;
if (typeof(TColor) == typeof(Rgb) && typeof(TDepth) == typeof(Byte))
{
Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGB24, false);
byte[] data = new byte[size.Width * size.Height * 3];
GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
using (Image<Rgb, byte> rgb = new Image<Rgb, byte>(size.Width, size.Height, size.Width * 3, dataHandle.AddrOfPinnedObject()))
{
rgb.ConvertFrom(image);
if (correctForVerticleFlip)
CvInvoke.cvFlip(rgb, rgb, FLIP.VERTICAL);
}
dataHandle.Free();
texture.LoadRawTextureData(data);
texture.Apply();
return texture;
}
else //if (typeof(TColor) == typeof(Rgba) && typeof(TDepth) == typeof(Byte))
{
Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGBA32, false);
byte[] data = new byte[size.Width * size.Height * 4];
GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
using (Image<Rgba, byte> rgba = new Image<Rgba, byte>(size.Width, size.Height, size.Width * 4, dataHandle.AddrOfPinnedObject()))
{
rgba.ConvertFrom(image);
if (correctForVerticleFlip)
CvInvoke.cvFlip(rgba, rgba, FLIP.VERTICAL);
}
dataHandle.Free();
texture.LoadRawTextureData(data);
texture.Apply();
return texture;
}
//return null;
}
If You don't want tuo use an InterOp You can use also something like
cameraframe.Convert<Rgb,byte>().Data.Cast<byte>().ToArray<byte>()
and use it instead of section using interOp
Both solutions worked for me. Just remember to destroy the texture before replacing it. I had memory leak issues before I did that.
Related
I'm working on a code where I basically have to take a low quality screenshot about every 30 milliseconds. The script is attached to a camara.
What I want to do is reduce the render texture size. The way the code is right now changing either W or H basically gets me a SECTION of of all that is being seen by the camara instead of a reduced size version. So my question is how can I resized or downsample what is read into the screenshot (Texture2D) but that it still is a representation of the entire screen.
public class CameraRenderToImage : MonoBehaviour
{
private RemoteRenderServer rrs;
void Start(){
TimeStamp.SetStart();
Camera.onPostRender += OnPostRenderCallback;
}
void OnPostRenderCallback(Camera cam){
if (TimeStamp.HasMoreThanThisEllapsed(30)){
TimeStamp.SetStart();
int W = Screen.width;
int H = Screen.height;
Texture2D screenshot = new Texture2D(W,H, TextureFormat.RGB24, false);
screenshot.ReadPixels( new Rect(0, 0, W,H), 0, 0);
byte[] bytes = screenshot.EncodeToPNG();
System.IO.File.WriteAllBytes("check_me_out.png", bytes);
TimeStamp.Tok("Encode to PNG and Save");
}
}
// Remove the onPostRender callback
void OnDestroy()
{
Camera.onPostRender -= OnPostRenderCallback;
}
}
If you need to resize your render texture from script you can refer to the next code snippet
void Resize(RenderTexture renderTexture, int width, int height) {
if (renderTexture) {
renderTexture.Release();
renderTexture.width = width;
renderTexture.height = height;
}
}
To make it possible to resize the render texture you first need to make sure it is released.
To get Texture2d:
private Texture2D ToCompressedTexture(ref RenderTexture renderTexture)
{
var texture = new Texture2D(renderTexture.width, renderTexture.height, TextureFormat.ARGB32, false);
var previousTarget = RenderTexture.active;
RenderTexture.active = renderTexture;
texture.ReadPixels(new Rect(0, 0, renderTexture.width, renderTexture.height), 0, 0);
RenderTexture.active = previousTarget;
texture.Compress(false);
texture.Apply(false, true);
renderTexture.Release();
renderTexture = null;
return texture;
}
I see that when I create mesh and textures in Unity on each frame (30fps) feels like Unity doesn't release these data from memory after the usage.
There is my code
private bool UpdateFrame(int frameIdx)
{
bool result = true;
int readyBuffSize = DecoderAPI.stream_get_ready_buffer_size(m_stream);
if (m_currMeshFrameIndex != frameIdx
&& readyBuffSize > 0)
{
m_currMeshFrameIndex = frameIdx;
IntPtr frame = DecoderAPI.stream_get_next_frame_obj(m_stream);
if (frame == IntPtr.Zero)
{
result = false;
}
else
{
long sequentialFrameIdx = DecoderAPI.get_sequential_number(frame);
DebugMethod("UNITY UpdateFrame", $"readyBuffSize :: {readyBuffSize}");
DebugMethod("UNITY UpdateFrame", $"sequentialFrameIdx :: {sequentialFrameIdx}");
Mesh releaseFormer = m_meshFilter.mesh;
m_meshFilter.mesh = CrteateMesh(frame);
Texture2D texture = CreateTexture(frame);
m_meshRenderer.material.SetTexture("_MainTex", texture);
DecoderAPI.stream_release_frame_obj(m_stream, frame);
Destroy(releaseFormer); // does not seem to help: even when there are no more allocations in C++ the process grows endlessly
}
}
return result;
}
private Mesh CrteateMesh(IntPtr frame)
{
Mesh mesh = new Mesh();
//Vertices***
int vertexCount = DecoderAPI.frame_get_vertex_count(frame);
byte[] xyzBytes = new byte[vertexCount * 3 * 4];
IntPtr xyz = DecoderAPI.frame_get_vertex_xyz(frame);
Vector3[] vertices = new Vector3[vertexCount];
GCHandle handle = GCHandle.Alloc(vertices, GCHandleType.Pinned);
IntPtr pointer = handle.AddrOfPinnedObject();
Marshal.Copy(xyz, xyzBytes, 0, xyzBytes.Length);
Marshal.Copy(xyzBytes, 0, pointer, xyzBytes.Length);
handle.Free();
mesh.vertices = vertices;
//***
//Faces***
int faceCount = DecoderAPI.frame_face_count(frame);
int trisArrSize = faceCount * 3;
int[] tris = new int[trisArrSize];
IntPtr indices = DecoderAPI.frame_face_indices(frame);
Marshal.Copy(indices, tris, 0, trisArrSize);
mesh.triangles = tris;
//***
mesh.RecalculateNormals();
//UV***
int uvCount = DecoderAPI.frame_get_uv_count(frame);
IntPtr uvData = DecoderAPI.frame_get_uv_data(frame);
int uvArrSize = uvCount * 2;
float[] uvArr = new float[uvArrSize];
Vector2[] uv = new Vector2[uvCount];
Marshal.Copy(uvData, uvArr, 0, uvArrSize);
for (int i = 0; i < uvCount; i++)
{
Vector2 result = new Vector2(uvArr[i * 2], uvArr[i * 2 + 1]) * new Vector2(1, -1);
uv[i] = result;
}
mesh.uv = uv;
//***
if (vertexCount != uvCount)
{
long frameId = DecoderAPI.get_sequential_number(frame);
DebugMethod("UNITY CrteateMesh", $"HERE : in frame id :: {frameId}, vertexCount : {vertexCount}, uvCount : {uvCount}");
}
return mesh;
}
private Texture2D CreateTexture(IntPtr frame)
{
IntPtr textureObj = DecoderAPI.frame_get_texture_obj(frame);
DecoderAPI.TextureInfo textureInfo = DecoderAPI.texture_get_info(textureObj);
int width = textureInfo.width;
int height = textureInfo.height;
int channels = textureInfo.channels;
int stride = textureInfo.stride;
//DecoderAPI.ColorType colorType = textureInfo.color_type;
IntPtr pixels = textureInfo.pixels;
Texture2D texture = new Texture2D(width, height, TextureFormat.RGB24, false);
//Texture2D texture = new Texture2D(width, height, TextureFormat.DXT5, false);
texture.LoadRawTextureData(pixels, width * channels * height);
texture.Apply();
return texture;
}
So, what I do is - I create a mesh and texture for each frame use it and then I expect that Unity should release them from memory after the usage, but no. Ok, I found like this method Destroy(releaseFormer) should help, but anyway it is the same I see in TaskManager that memory grows endlessly...
For test I have tried -> I start my c++ code generate (let's say 100 frames) then I stop it (so my c++ doesn't allocate nothing) and I still see that memory grows up to the end. What I expect is - ok even if Unity doesn't release data that I don't need more, I loaded 100 frames that is it, why memory continues to grow?
Question is - how to release from memory all that frames that I don't need?
EDIT
I have changed this method, added Destroy in proper order
private bool UpdateFrame(int frameIdx)
{
bool result = true;
int readyBuffSize = -1;
if (m_stream != IntPtr.Zero)
{
readyBuffSize = DecoderAPI.stream_get_ready_buffer_size(m_stream);
}
if (m_currMeshFrameIndex != frameIdx
&& readyBuffSize > 0)
{
m_currMeshFrameIndex = frameIdx;
IntPtr frame = DecoderAPI.stream_get_next_frame_obj(m_stream);
if (frame == IntPtr.Zero)
{
result = false;
}
else
{
long sequentialFrameIdx = DecoderAPI.frame_get_sequential_number(frame);
DebugMethod("UNITY UpdateFrame", $"readyBuffSize :: {readyBuffSize}");
DebugMethod("UNITY UpdateFrame", $"sequentialFrameIdx :: {sequentialFrameIdx}");
if (m_meshFilter.mesh != null)
{
Destroy(m_meshFilter.mesh);
}
m_meshFilter.mesh = CrteateMesh(frame);
if (m_texture != null)
{
Destroy(m_texture);
}
m_texture = CreateTexture(frame);
m_meshRenderer.material.SetTexture("_MainTex", m_texture);
if (m_stream != IntPtr.Zero)
{
DecoderAPI.stream_release_frame_obj(m_stream, frame);
}
}
}
return result;
}
releaseFormer is the mesh right? Did you try calling Destroy on the texture object itself?
Another thread suggested Resources.UnloadUnusedAssets()
Personally I'd be trying to do this with a RenderTexture especially if the texture size doesn't change too often, though might not be possible for your use case
I'm developing a Vuforia app for HoloLens by using Unity.
This app displays a simple 3D Object when an image target is detected.
I'm also using the fm Exhibition Tool Pack hololens from the Unity Asset Store in order to stream the app running on HoloLens to a PC.
Everything works fine but when i stream the app to PC i see the 3D Unity scene instead of the room.
So i've tried to get the webcam texture and attach it to a cube inside the scene but the vuforia ARCamera get somehow conflict with it and i can't see anything on the cube. Instead when i run the app inside the Unity Simulator i see myself on the cube.
Is there a way to get the webcam texture 2D from Vuforia and attach it to a GameObject inside the scene? Maybe with the Vuforia.Image class? But i don't know how it works.
Below scripts are compatible with FMETP STREAM.
The scripts are tested on mobile.
using UnityEngine;
using System.Collections;
using Vuforia;
using UnityEngine.UI;
public class VuforiaCamAccess : MonoBehaviour
{
private bool mAccessCameraImage = true;
public RawImage rawImage;
public GameObject Mesh;
private Texture2D texture;
#if UNITY_EDITOR
private Vuforia.PIXEL_FORMAT mPixelFormat = Vuforia.PIXEL_FORMAT.GRAYSCALE;
#else
private Vuforia.PIXEL_FORMAT mPixelFormat = Vuforia.PIXEL_FORMAT.RGB888;
#endif
private bool mFormatRegistered = false;
void Start()
{
#if UNITY_EDITOR
texture = new Texture2D(Screen.width, Screen.height, TextureFormat.R8, false);
#else
texture = new Texture2D(Screen.width, Screen.height, TextureFormat.RGB24, false);
#endif
// Register Vuforia life-cycle callbacks:
Vuforia.VuforiaARController.Instance.RegisterVuforiaStartedCallback(OnVuforiaStarted);
Vuforia.VuforiaARController.Instance.RegisterOnPauseCallback(OnPause);
Vuforia.VuforiaARController.Instance.RegisterTrackablesUpdatedCallback(OnTrackablesUpdated);
}
private void OnVuforiaStarted()
{
// Try register camera image format
if (CameraDevice.Instance.SetFrameFormat(mPixelFormat, true))
{
Debug.Log("Successfully registered pixel format " + mPixelFormat.ToString());
mFormatRegistered = true;
}
else
{
Debug.LogError("Failed to register pixel format " + mPixelFormat.ToString() +
"\n the format may be unsupported by your device;" +
"\n consider using a different pixel format.");
mFormatRegistered = false;
}
}
private void OnPause(bool paused)
{
if (paused)
{
Debug.Log("App was paused");
UnregisterFormat();
}
else
{
Debug.Log("App was resumed");
RegisterFormat();
}
}
private void OnTrackablesUpdated()
{
//skip if still loading image to texture2d
if (LoadingTexture) return;
if (mFormatRegistered)
{
if (mAccessCameraImage)
{
Vuforia.Image image = CameraDevice.Instance.GetCameraImage(mPixelFormat);
//if (image != null && image.IsValid())
if (image != null)
{
byte[] pixels = image.Pixels;
int width = image.Width;
int height = image.Height;
StartCoroutine(SetTexture(pixels, width, height));
}
}
}
}
bool LoadingTexture = false;
IEnumerator SetTexture(byte[] pixels, int width, int height)
{
if (!LoadingTexture)
{
LoadingTexture = true;
if (pixels != null && pixels.Length > 0)
{
if (texture.width != width || texture.height != height)
{
#if UNITY_EDITOR
texture = new Texture2D(width, height, TextureFormat.R8, false);
#else
texture = new Texture2D(width, height, TextureFormat.RGB24, false);
#endif
}
texture.LoadRawTextureData(pixels);
texture.Apply();
if (rawImage != null)
{
rawImage.texture = texture;
rawImage.material.mainTexture = texture;
}
if (Mesh != null) Mesh.GetComponent<Renderer>().material.mainTexture = texture;
}
yield return null;
LoadingTexture = false;
}
}
private void UnregisterFormat()
{
Debug.Log("Unregistering camera pixel format " + mPixelFormat.ToString());
CameraDevice.Instance.SetFrameFormat(mPixelFormat, false);
mFormatRegistered = false;
}
private void RegisterFormat()
{
if (CameraDevice.Instance.SetFrameFormat(mPixelFormat, true))
{
Debug.Log("Successfully registered camera pixel format " + mPixelFormat.ToString());
mFormatRegistered = true;
}
else
{
Debug.LogError("Failed to register camera pixel format " + mPixelFormat.ToString());
mFormatRegistered = false;
}
}
}
I'm trying to build a program that takes your photo and places it with a different background, like a monument or so. So far, I was able to turn the camera on when I start the project with this code
webcamTexture = new WebCamTexture();
rawImage.texture = webcamTexture;
rawImage.material.mainTexture = webcamTexture;
webcamTexture.Play();
Texture2D PhotoTaken = new Texture2D (webcamTexture.width, webcamTexture.height);
PhotoTaken.SetPixels (webcamTexture.GetPixels ());
PhotoTaken.Apply ();
However, I can't take a screenshot or photo because it always ends up all black. I've tried different codes but nothing is working. Can someone please help? Thanks
EDIT
After some tries, this is the code I have:
WebCamTexture webcamTexture;
public RawImage rawImage;
void Start () {
webcamTexture = new WebCamTexture();
rawImage.texture = webcamTexture;
rawImage.material.mainTexture = webcamTexture;
webcamTexture.Play();
RenderTexture texture= new RenderTexture(webcamTexture.width, webcamTexture.height,0);
Graphics.Blit(webcamTexture, texture);
Button btn = yourButton.GetComponent<Button>();
btn.onClick.AddListener(OnClick);
}
public IEnumerator Coroutine(){
yield return new WaitForEndOfFrame ();
}
public void OnClick() {
var width = 767;
var height = 575;
Texture2D texture = new Texture2D(width, height);
texture.ReadPixels(new Rect(0, 0, width, height), 0, 0);
texture.Apply();
// Encode texture into PNG
var bytes = texture.EncodeToPNG();
//Destroy(texture);
File.WriteAllBytes (Application.dataPath + "/../SavedScreen.png", bytes);
}
and with this next code the screenshot is taken, but it takes a photo of the whole thing, and not just a bit of the screen.
void Start()
{
// Set the playback framerate!
// (real time doesn't influence time anymore)
Time.captureFramerate = frameRate;
// Find a folder that doesn't exist yet by appending numbers!
realFolder = folder;
int count = 1;
while (System.IO.Directory.Exists(realFolder))
{
realFolder = folder + count;
count++;
}
// Create the folder
System.IO.Directory.CreateDirectory(realFolder);
}
void Update()
{
// name is "realFolder/shot 0005.png"
var name = string.Format("{0}/shot {1:D04}.png", realFolder, Time.frameCount);
// Capture the screenshot
Application.CaptureScreenshot(name, sizeMultiplier);
}
}
You can take a screenshot like this in Unity
Application.CaptureScreenshot("Screenshot.png");
Reference
EDIT 1
To take a screenshot on a specific part of the screen use the following script:
var width = 400;
var height = 300;
var startX = 200;
var startY = 100;
var tex = new Texture2D (width, height, TextureFormat.RGB24, false);
tex.ReadPixels (Rect(startX, startY, width, height), 0, 0);
tex.Apply ();
// Encode texture into PNG
var bytes = tex.EncodeToPNG();
Destroy(tex);
File.WriteAllBytes(Application.dataPath + "/../SavedScreen.png", bytes);
Reference
I use EmguCV open webcam in unity.
But it's fps is low much.
this is my code ↓
private Texture2D texture;
private Capture capture;
private Color32[] color = new Color32[640*480];
// Use this for initialization
void Start () {
texture = new Texture2D (640, 480);
capture = new Capture ();
}
// Update is called once per frame
void Update () {
Image<Bgr, Byte> currentFrame = capture.QueryFrame();
Bitmap bitmapCurrentFrame = currentFrame.ToBitmap();
Image<Bgra, Byte> img = new Image<Bgra, Byte> (bitmapCurrentFrame);
for(int y=0; y<480; y++){
for(int x=0; x<640; x++){
int index = y+x*480;
print(index+";"+x+";"+y);
//byte b = img.Data[x,y,0];
color[index].r = img.Data[x,y,2];
color[index].g = img.Data[x,y,1];
color[index].b = img.Data[x,y,0];
color[index].a = 0xff;
}
}
texture.SetPixels32 (color);
texture.Apply (false);
renderer.material.mainTexture = texture;
}
i don't know why fps is so low...
and why my boss like EmguCV with Unity, why he don't use Unity-WebCamTexture...
OKAY,i really thank you for your read.
Hope, I can get some answer.
Look at Texture2D.LoadRawTextureData. It has no proper docs, so here's a snippet:
Texture2D tex = new Texture2D(width, height, format, false, true);
tex.LoadRawTextureData(buffer);
tex.Apply(false, true);
Buffer must be in the correct hardware format. For the format variable look at the list of formats that unity accepts.