How to release memory in Unity? - unity3d

I see that when I create mesh and textures in Unity on each frame (30fps) feels like Unity doesn't release these data from memory after the usage.
There is my code
private bool UpdateFrame(int frameIdx)
{
bool result = true;
int readyBuffSize = DecoderAPI.stream_get_ready_buffer_size(m_stream);
if (m_currMeshFrameIndex != frameIdx
&& readyBuffSize > 0)
{
m_currMeshFrameIndex = frameIdx;
IntPtr frame = DecoderAPI.stream_get_next_frame_obj(m_stream);
if (frame == IntPtr.Zero)
{
result = false;
}
else
{
long sequentialFrameIdx = DecoderAPI.get_sequential_number(frame);
DebugMethod("UNITY UpdateFrame", $"readyBuffSize :: {readyBuffSize}");
DebugMethod("UNITY UpdateFrame", $"sequentialFrameIdx :: {sequentialFrameIdx}");
Mesh releaseFormer = m_meshFilter.mesh;
m_meshFilter.mesh = CrteateMesh(frame);
Texture2D texture = CreateTexture(frame);
m_meshRenderer.material.SetTexture("_MainTex", texture);
DecoderAPI.stream_release_frame_obj(m_stream, frame);
Destroy(releaseFormer); // does not seem to help: even when there are no more allocations in C++ the process grows endlessly
}
}
return result;
}
private Mesh CrteateMesh(IntPtr frame)
{
Mesh mesh = new Mesh();
//Vertices***
int vertexCount = DecoderAPI.frame_get_vertex_count(frame);
byte[] xyzBytes = new byte[vertexCount * 3 * 4];
IntPtr xyz = DecoderAPI.frame_get_vertex_xyz(frame);
Vector3[] vertices = new Vector3[vertexCount];
GCHandle handle = GCHandle.Alloc(vertices, GCHandleType.Pinned);
IntPtr pointer = handle.AddrOfPinnedObject();
Marshal.Copy(xyz, xyzBytes, 0, xyzBytes.Length);
Marshal.Copy(xyzBytes, 0, pointer, xyzBytes.Length);
handle.Free();
mesh.vertices = vertices;
//***
//Faces***
int faceCount = DecoderAPI.frame_face_count(frame);
int trisArrSize = faceCount * 3;
int[] tris = new int[trisArrSize];
IntPtr indices = DecoderAPI.frame_face_indices(frame);
Marshal.Copy(indices, tris, 0, trisArrSize);
mesh.triangles = tris;
//***
mesh.RecalculateNormals();
//UV***
int uvCount = DecoderAPI.frame_get_uv_count(frame);
IntPtr uvData = DecoderAPI.frame_get_uv_data(frame);
int uvArrSize = uvCount * 2;
float[] uvArr = new float[uvArrSize];
Vector2[] uv = new Vector2[uvCount];
Marshal.Copy(uvData, uvArr, 0, uvArrSize);
for (int i = 0; i < uvCount; i++)
{
Vector2 result = new Vector2(uvArr[i * 2], uvArr[i * 2 + 1]) * new Vector2(1, -1);
uv[i] = result;
}
mesh.uv = uv;
//***
if (vertexCount != uvCount)
{
long frameId = DecoderAPI.get_sequential_number(frame);
DebugMethod("UNITY CrteateMesh", $"HERE : in frame id :: {frameId}, vertexCount : {vertexCount}, uvCount : {uvCount}");
}
return mesh;
}
private Texture2D CreateTexture(IntPtr frame)
{
IntPtr textureObj = DecoderAPI.frame_get_texture_obj(frame);
DecoderAPI.TextureInfo textureInfo = DecoderAPI.texture_get_info(textureObj);
int width = textureInfo.width;
int height = textureInfo.height;
int channels = textureInfo.channels;
int stride = textureInfo.stride;
//DecoderAPI.ColorType colorType = textureInfo.color_type;
IntPtr pixels = textureInfo.pixels;
Texture2D texture = new Texture2D(width, height, TextureFormat.RGB24, false);
//Texture2D texture = new Texture2D(width, height, TextureFormat.DXT5, false);
texture.LoadRawTextureData(pixels, width * channels * height);
texture.Apply();
return texture;
}
So, what I do is - I create a mesh and texture for each frame use it and then I expect that Unity should release them from memory after the usage, but no. Ok, I found like this method Destroy(releaseFormer) should help, but anyway it is the same I see in TaskManager that memory grows endlessly...
For test I have tried -> I start my c++ code generate (let's say 100 frames) then I stop it (so my c++ doesn't allocate nothing) and I still see that memory grows up to the end. What I expect is - ok even if Unity doesn't release data that I don't need more, I loaded 100 frames that is it, why memory continues to grow?
Question is - how to release from memory all that frames that I don't need?
EDIT
I have changed this method, added Destroy in proper order
private bool UpdateFrame(int frameIdx)
{
bool result = true;
int readyBuffSize = -1;
if (m_stream != IntPtr.Zero)
{
readyBuffSize = DecoderAPI.stream_get_ready_buffer_size(m_stream);
}
if (m_currMeshFrameIndex != frameIdx
&& readyBuffSize > 0)
{
m_currMeshFrameIndex = frameIdx;
IntPtr frame = DecoderAPI.stream_get_next_frame_obj(m_stream);
if (frame == IntPtr.Zero)
{
result = false;
}
else
{
long sequentialFrameIdx = DecoderAPI.frame_get_sequential_number(frame);
DebugMethod("UNITY UpdateFrame", $"readyBuffSize :: {readyBuffSize}");
DebugMethod("UNITY UpdateFrame", $"sequentialFrameIdx :: {sequentialFrameIdx}");
if (m_meshFilter.mesh != null)
{
Destroy(m_meshFilter.mesh);
}
m_meshFilter.mesh = CrteateMesh(frame);
if (m_texture != null)
{
Destroy(m_texture);
}
m_texture = CreateTexture(frame);
m_meshRenderer.material.SetTexture("_MainTex", m_texture);
if (m_stream != IntPtr.Zero)
{
DecoderAPI.stream_release_frame_obj(m_stream, frame);
}
}
}
return result;
}

releaseFormer is the mesh right? Did you try calling Destroy on the texture object itself?
Another thread suggested Resources.UnloadUnusedAssets()
Personally I'd be trying to do this with a RenderTexture especially if the texture size doesn't change too often, though might not be possible for your use case

Related

Graphics.DrawMesh has wired offset while scaling in Unity, why?

I'm implementing after image effect currently and I meet a problem with Graphics.DrawMesh. The code shows below
public class AfterImage3DByCombine : MonoBehaviour
{
public class AfterImange
{
public Mesh mesh;
public Material material;
// public Matrix4x4 matrix;
public float duration;
public float time;
}
protected SkinnedMeshRenderer[] skinRenderers;
protected MeshFilter[] filters;
protected int filtersCount = 0;
public bool IncludeMeshFilter = true;
public Material EffectMaterial;
public float Duration = 5;
public float Interval = 0.2f;
public float FadeoutTime = 1;
private float mTime = 5;
private List<AfterImange> mAfterImageList = new List<AfterImange>();
protected virtual void Awake()
{
skinRenderers = GetComponentsInChildren<SkinnedMeshRenderer>();
if (IncludeMeshFilter)
{
filters = GetComponentsInChildren<MeshFilter>();
filtersCount = filters.Length;
}
}
//call from another place to have after image effect
public void Play()
{
if (skinRenderers.Length + filtersCount <= 0)
{
return;
}
mTime = Duration;
StartCoroutine(AddAfterImage());
}
IEnumerator AddAfterImage()
{
while (mTime > 0)
{
CreateImage();
yield return new WaitForSeconds(Interval);
mTime -= Interval;
}
yield return null;
}
void CreateImage()
{
CombineInstance[] combineInstances = new CombineInstance[skinRenderers.Length + filtersCount];
int index = 0;
for (int i = 0; i < skinRenderers.Length; i++)
{
var render = skinRenderers[i];
var mesh = new Mesh();
render.BakeMesh(mesh);
combineInstances[index] = new CombineInstance
{
mesh = mesh,
transform = render.gameObject.transform.localToWorldMatrix,
subMeshIndex = 0
};
index++;
}
for (int i = 0; i < filtersCount; i++)
{
var render = filters[i];
var temp = (render.sharedMesh != null) ? render.sharedMesh : render.mesh;
var mesh = (Mesh)Instantiate(temp);
combineInstances[index] = new CombineInstance
{
mesh = mesh,
transform = render.gameObject.transform.localToWorldMatrix,
subMeshIndex = 0
};
index++;
}
Mesh combinedMesh = new Mesh();
combinedMesh.CombineMeshes(combineInstances, true, true);
mAfterImageList.Add(new AfterImange
{
mesh = combinedMesh,
material = new Material(EffectMaterial),
time = FadeoutTime,
duration = FadeoutTime,
});
}
void LateUpdate()
{
bool needRemove = false;
foreach (var image in mAfterImageList)
{
image.time -= Time.deltaTime;
if (image.material.HasProperty("_Color"))
{
Color color = Color.red;
color.a = Mathf.Max(0, image.time / image.duration);
image.material.SetColor("_Color", color);
}
Matrix4x4 mat = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, Vector3.one * 2f);
//public static void DrawMesh(Mesh mesh, Matrix4x4 matrix, Material material, int layer, Camera camera, int submeshIndex, MaterialPropertyBlock properties, ShadowCastingMode castShadows);
Graphics.DrawMesh(image.mesh, Matrix4x4.identity, image.material, gameObject.layer, null, 0, null, false);
if (image.time <= 0)
{
needRemove = true;
}
}
if (needRemove)
{
mAfterImageList.RemoveAll(x => x.time <= 0);
}
}
}
Since my prefab has 0.5 times scaling while it's running, then I pass a matrix with two times scaling Matrix4x4 mat = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, Vector3.one * 2f); into
Graphics.DrawMesh.
Then, the mesh created by Graphics.DrawMesh isn't in its original position, there is an offset between original mesh and created mesh.
And if I passed Matrix4x4.Identity into Graphics.DrawMesh, the created mesh will have 0.5 times scaling, which looks smaller than original mesh.
Why there is an offset and how could I eliminate the offset without chaning the prefab's scale?

how to apply texture on 3d model, dynamically?

I have a file where written array of vertexes, indexes, uv, textures and so on, in two words everything in order to draw model with texture on it. For example it is should be 3d cube with texture like wood.
So, what I have for now is - I can present vertexes of cube(I see my model), but I don't know how to apply a texture for this.
there is my code -
public void Start()
{
m_stream = DecoderAPI.create_stream_decoder_obj();
string pathToFile = "path_to_my_file";
bool isInitialized = DecoderAPI.stream_init_model(m_stream, pathToFile);
if (isInitialized)
{
m_curFrame = DecoderAPI.stream_get_frame_obj(m_stream, 1);
MeshRenderer meshRenderer = gameObject.AddComponent<MeshRenderer>();
meshRenderer.sharedMaterial = new Material(Shader.Find("Standard"));
Mesh mesh = new Mesh();
//Vertices***
int vertexCount = DecoderAPI.frame_get_vertex_count(m_curFrame);
int xyzArrSize = vertexCount * 3;
float[] xyzArray = new float[xyzArrSize];
IntPtr xyz = DecoderAPI.frame_get_vertex_xyz(m_curFrame);
Marshal.Copy(xyz, xyzArray, 0, xyzArrSize);
Vector3[] vertices = new Vector3[vertexCount];
for (int i = 0; i < vertexCount; i++)
{
vertices[i] = new Vector3(xyzArray[i * 3], xyzArray[i * 3 + 1], xyzArray[i * 3 + 2]);
}
mesh.vertices = vertices;
//***
//Faces***
int faceCount = DecoderAPI.frame_face_count(m_curFrame);
int trisArrSize = faceCount * 3;
int[] tris = new int[trisArrSize];
IntPtr indices = DecoderAPI.frame_face_indices(m_curFrame);
Marshal.Copy(indices, tris, 0, trisArrSize);
mesh.triangles = tris;
//***
mesh.RecalculateNormals();
MeshFilter meshFilter = gameObject.AddComponent<MeshFilter>();
meshFilter.mesh = mesh;
//TEXTURE ****
int uvCount = DecoderAPI.frame_get_uv_count(m_curFrame);
IntPtr uvData = DecoderAPI.frame_get_uv_data(m_curFrame);
IntPtr textureObj = DecoderAPI.frame_get_texture_obj(m_curFrame);
DecoderAPI.TextureInfo textureInfo = DecoderAPI.texture_get_info(textureObj);
int width = textureInfo.width;
int height = textureInfo.height;
int channels = textureInfo.channels;
int stride = textureInfo.stride;
DecoderAPI.ColorType color_type = textureInfo.color_type;
IntPtr pixels = textureInfo.pixels;
HOW TO APPLY THIS TEXTURE DATA TO MY MODEL????
//***
DecoderAPI.frame_release(m_curFrame);
}
}
I found this answer - https://answers.unity.com/questions/390878/how-do-i-apply-a-texture-to-a-3d-model.html
but I need to know to apply it dynamically
Any suggestions? Or maybe some thinks to tutorials?
EDIT
public void Start()
{
m_stream = DecoderAPI.create_stream_decoder_obj();
string pathToFile = "my_path_to_file";
bool isInitialized = DecoderAPI.stream_init_model(m_stream, pathToFile);
if (isInitialized)
{
m_curFrame = DecoderAPI.stream_get_frame_obj(m_stream, 1);
MeshRenderer meshRenderer = gameObject.AddComponent<MeshRenderer>();
meshRenderer.sharedMaterial = new Material(Shader.Find("Standard"));
Mesh mesh = new Mesh();
//Vertices***
int vertexCount = DecoderAPI.frame_get_vertex_count(m_curFrame);
int xyzArrSize = vertexCount * 3;
float[] xyzArray = new float[xyzArrSize];
IntPtr xyz = DecoderAPI.frame_get_vertex_xyz(m_curFrame);
Marshal.Copy(xyz, xyzArray, 0, xyzArrSize);
Vector3[] vertices = new Vector3[vertexCount];
for (int i = 0; i < vertexCount; i++)
{
vertices[i] = new Vector3(xyzArray[i * 3], xyzArray[i * 3 + 1], xyzArray[i * 3 + 2]);
}
mesh.vertices = vertices;
//***
//Faces***
int faceCount = DecoderAPI.frame_face_count(m_curFrame);
int trisArrSize = faceCount * 3;
int[] tris = new int[trisArrSize];
IntPtr indices = DecoderAPI.frame_face_indices(m_curFrame);
Marshal.Copy(indices, tris, 0, trisArrSize);
mesh.triangles = tris;
//***
mesh.RecalculateNormals();
//UV***
int uvCount = DecoderAPI.frame_get_uv_count(m_curFrame);
IntPtr uvData = DecoderAPI.frame_get_uv_data(m_curFrame);
int uvArrSize = uvCount * 2;
float[] uvArr = new float[uvArrSize];
Vector2[] uv = new Vector2[uvCount];
Marshal.Copy(uvData, uvArr, 0, uvArrSize);
for(int i = 0; i < uvCount; i++)
{
uv[i] = new Vector2(uvArr[i * 2], uvArr[i * 2 + 1]);
}
mesh.uv = uv;
//***
MeshFilter meshFilter = gameObject.AddComponent<MeshFilter>();
meshFilter.mesh = mesh;
//TEXTURE ****
IntPtr textureObj = DecoderAPI.frame_get_texture_obj(m_curFrame);
DecoderAPI.TextureInfo textureInfo = DecoderAPI.texture_get_info(textureObj);
int width = textureInfo.width;
int height = textureInfo.height;
int channels = textureInfo.channels;
int stride = textureInfo.stride;
DecoderAPI.ColorType color_type = textureInfo.color_type;
IntPtr pixels = textureInfo.pixels;
Texture2D texture = new Texture2D(width, height);
texture.LoadRawTextureData(pixels, width * channels *height);
texture.Apply();
meshRenderer.material.SetTexture("_MainText", texture);
//***
DecoderAPI.frame_release(m_curFrame);
}
}
But for now I am getting such an error
UnityException: LoadRawTextureData: not enough data provided (will result in overread).
UnityEngine.Texture2D.LoadRawTextureData (System.IntPtr data, System.Int32 size) (at <a9810827dce3444a8e5c4e9f3f5e0828>:0)
Model.Start () (at Assets/Scripts/Model.cs:98)
What am I doing wrong?
First off, your code to construct the cube is missing the UV part, so even if you assign the texture to the material the result is undetermined. Look at the code samples in Mesh manual page about adding the UV as well: https://docs.unity3d.com/ScriptReference/Mesh.html
Once you have the UV, all you have to do is to set the texture using SetTexture (see https://docs.unity3d.com/ScriptReference/Material.SetTexture.html).
On a separate note, in your code, you are using Shared Material instead of Material: that is not advisable unless you have many objects all using the same material and you want to change them all.
EDIT:
To get a texture from a pixels buffer you create a Texture2D object of the given size and colour type, then you apply the data like this:
myTexture.LoadRawTextureData(myPixels);
myTexture.Apply();
~Pino
If you have a texture and a MeshRenderer, it works like this:
void SetYourTexture()
{
MeshRenderer yourMeshRenderer = GetComponent<MeshRenderer>();
//If you only need one texture at the material unity understand _MainText as the mainTexture;
yourMeshRenderer.material.SetTexture("_MainText", yourTexture);
}

Microphone, gain value and spectrum values do not sync using Unity

I am making a simple voice visualization program. My goals are:
Playback microphone input
Visualize voice spectrum and gain in real time
Here is my code:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class VisualizeVoice : MonoBehaviour
{
private const int NUM_SPECTRUM_SAMPLES = 256;
private const int NUM_SPECTRUM_BARS = 32;
private const int NUM_PCM_SAMPLES = 16000;
private const float BAR_DROP_SPEED = 1e-3f;
private const int NUM_SAMPLES_TO_AVERAGE = 8;
private string _deviceName;
private float[] _spectrumData = new float[NUM_SPECTRUM_SAMPLES];
private float[] _fPCMData = new float[NUM_PCM_SAMPLES];
private float _gain = 0;
private AudioClip _audio; // Audio from microphone
private AudioSource _playback; // To play the audio from microphone
// For visualization
private GameObject[] _spectrumBars = new GameObject[NUM_SPECTRUM_BARS];
private GameObject _gainBar;
// Start is called before the first frame update
void Start()
{
if (Microphone.devices.Length == 0) {
Debug.LogError("No Microphone");
return;
}
_deviceName = Microphone.devices[0];
Debug.Log("Current microphone is " + _deviceName);
if ((_playback = this.GetComponent<AudioSource>()) == null) {
_playback = this.gameObject.AddComponent<AudioSource>();
}
_playback.loop = true;
_playback.bypassEffects = true;
_playback.bypassListenerEffects = true;
_playback.bypassReverbZones = true;
_playback.priority = 0;
_playback.pitch = 1;
_playback.clip = _audio = Microphone.Start(_deviceName, true, 1, AudioSettings.outputSampleRate);
// Sync microphone and playback, but it always fails
float waitTime = 0;
while (!(Microphone.GetPosition(_deviceName) > 0) && waitTime <= 2)
waitTime += Time.deltaTime;
if (waitTime > 2) {
Debug.LogError("time out waiting for microphone");
}
_playback.Play();
InitVisualization();
}
// Update is called once per frame
void Update()
{
// Get PCM data and calculate gain
var audioPosition = Microphone.GetPosition(_deviceName);
_audio.GetData(_fPCMData, audioPosition);
UpdateGain();
// Get spectrum data
_playback.GetSpectrumData(_spectrumData, 0, FFTWindow.BlackmanHarris);
// Update visualization
UpdateVisualization();
}
private void InitVisualization()
{
// Initialize spectrum bars
for (int ibar = 0; ibar < NUM_SPECTRUM_BARS; ibar++) {
_spectrumBars[ibar] = GameObject.CreatePrimitive(PrimitiveType.Cube);
_spectrumBars[ibar].transform.parent = this.transform;
_spectrumBars[ibar].transform.localPosition = new Vector3(ibar, 0, 0);
_spectrumBars[ibar].transform.localScale = new Vector3(1, 0, 1);
}
// Initialize gain bar
_gainBar = GameObject.CreatePrimitive(PrimitiveType.Cube);
_gainBar.transform.parent = this.transform;
_gainBar.transform.localPosition = new Vector3(-5, 0, 0);
_gainBar.transform.localScale = new Vector3(4, 0, 1);
// Overall dimension
this.transform.localScale = new Vector3(0.2f, 10.0f, 0.2f);
}
private void UpdateVisualization()
{
// Update spectrum bars
int nSamplesPerBar = NUM_SPECTRUM_SAMPLES / NUM_SPECTRUM_BARS;
for (int ibar = 0; ibar < NUM_SPECTRUM_BARS; ibar++) {
// Calculate value of each bar
float value = 0;
for (int isample = 0; isample < nSamplesPerBar; isample++) {
value += _spectrumData[ibar * nSamplesPerBar + isample];
}
value /= nSamplesPerBar;
// Use current value if increasing, or slowly drop previous value if decreasing
float prevValue = _spectrumBars[ibar].transform.localScale.y;
if (value < prevValue)
value = prevValue - BAR_DROP_SPEED;
// Y scale is set to value
_spectrumBars[ibar].transform.localScale = new Vector3(1, value, 1);
}
// Update gain bar
_gainBar.transform.localScale = new Vector3(4, _gain, 1);
}
private void UpdateGain()
{
_gain = 0;
for(int i = 0; i < NUM_SAMPLES_TO_AVERAGE; i++) {
_gain += Mathf.Abs(_fPCMData[NUM_PCM_SAMPLES - i - 1]);
}
_gain /= NUM_SAMPLES_TO_AVERAGE;
}
}
Here are my questions:
I can't use while (!Microphone.GetPosition(_deviceName) > 0)); to avoid latency from microphone to speaker. If I use it, my application just freezes. If I add code to allow time-out, it has time-out every time.
The gain bar seems irrelevant with my voice. I don't know if my calculation is right.
I'm not sure if I need to average over multiple samples calculating gains, and how many samples I need to average over. I need this gain value later to detect silent moments and cut audio data.
To 1.
You can. Unity allows to define Start as a Coroutine
private IEnumerator Start()
{
...
}
On this way you can use a non blocking
while (!Microphone.GetPosition(_deviceName) > 0))
{
yield return null;
}

Unity3d code size differs from UI size

I got a square in Unity with a size of 500x500(slightly transparent).
Additionally, I want to take a snapshot of this area. Therefore, I created the following code for taking a picture(the area has got a size of 500x500).
public class CameraShot : MonoBehaviour{
private int width = 500;
private int height = 500;
private string saveFolder = "/picture";
private string saveType = ".png";
public void MakePicture()
{
var texture = SetClipping();
EncodeAndSafe(texture);
}
private Texture2D SetClipping()
{
int x = (Screen.width - width) / 2;
int y = (Screen.height - height) / 2;
Debug.Log("[AR_Recognition] CameraShot: " + Screen.dpi);
var tex = new Texture2D(width, height, TextureFormat.RGB24, false);
Rect rect = new Rect(x, y, width, height);
tex.ReadPixels(rect, 0, 0);
tex.Apply();
return tex;
}
private void EncodeAndSafe(Texture2D texture)
{
int count = 1;
var bytes = texture.EncodeToPNG();
Destroy(texture);
string savePath = Application.persistentDataPath + saveFolder;
while (true) {
if (System.IO.File.Exists(savePath))
{
count++;
}
else
{
File.WriteAllBytes(savePath+count+saveType, bytes);
Debug.Log("[AR_Recognition] CameraShot: " + savePath + saveType);
break;
}
}
}
Now I got the following issue:
The picture taken is not matching the 500x500 square but I cannot figure out why.
Ty
EDIT: The Canvas information

Emgucv Image to texture2d on Unity

public class testEmguCV : MonoBehaviour
{
private Capture capture;
void Start()
{
capture = new Capture();
}
void Update()
{
Image<Gray, Byte> currentFrame = capture.QueryGrayFrame();
Bitmap bitmapCurrentFrame = currentFrame.ToBitmap();
MemoryStream m = new MemoryStream();
bitmapCurrentFrame.Save(m, bitmapCurrentFrame.RawFormat);
Texture2D camera = new Texture2D(400, 400);
if (currentFrame != null)
{
camera.LoadImage(m.ToArray());
renderer.material.mainTexture = camera;
}
}
}
I used above code to convert between camera feed from emgucv camera to texture2d in unity but i am having problem with bitmapCurrentFrame.Save(m, bitmapCurrentFrame.RawFormat);
it is giving following errors
ArgumentNullException: Argument cannot be null. Parameter name:
encoder System.Drawing.Image.Save (System.IO.Stream stream,
System.Drawing.Imaging.ImageCodecInfo encoder,
System.Drawing.Imaging.EncoderParameters encoderParams)
System.Drawing.Image.Save (System.IO.Stream stream,
System.Drawing.Imaging.ImageFormat format) (wrapper
remoting-invoke-with-check) System.Drawing.Image:Save
(System.IO.Stream,System.Drawing.Imaging.ImageFormat)
WebcamUsingEmgucv.Update () (at Assets/WebcamUsingEmgucv.cs:51)
After several hours of thinking and searching i dont know what is going on please help
I used your example in our project, thanks! But i modified it to :
void Update()
{
if(capture == null)
{
Debug.LogError("Capture is null");
return;
}
Image<Gray, Byte> currentFrame = capture.QueryGrayFrame();
MemoryStream m = new MemoryStream();
currentFrame.Bitmap.Save(m, currentFrame.Bitmap.RawFormat);
Texture2D camera = new Texture2D(400, 400);
if (currentFrame != null)
{
camera.LoadImage(m.ToArray());
renderer.material.mainTexture = camera;
}
}
And it is work! Fps average ~30-35. Good luck!
Try to use this:
https://github.com/neutmute/emgucv/blob/3ceb85cba71cf957d5e31ae0a70da4bbf746d0e8/Emgu.CV/PInvoke/Unity/TextureConvert.cs
it has something like this:
public static Texture2D ImageToTexture2D<TColor, TDepth>(Image<TColor, TDepth> image, bool correctForVerticleFlip)
where TColor : struct, IColor
where TDepth : new()
{
Size size = image.Size;
if (typeof(TColor) == typeof(Rgb) && typeof(TDepth) == typeof(Byte))
{
Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGB24, false);
byte[] data = new byte[size.Width * size.Height * 3];
GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
using (Image<Rgb, byte> rgb = new Image<Rgb, byte>(size.Width, size.Height, size.Width * 3, dataHandle.AddrOfPinnedObject()))
{
rgb.ConvertFrom(image);
if (correctForVerticleFlip)
CvInvoke.cvFlip(rgb, rgb, FLIP.VERTICAL);
}
dataHandle.Free();
texture.LoadRawTextureData(data);
texture.Apply();
return texture;
}
else //if (typeof(TColor) == typeof(Rgba) && typeof(TDepth) == typeof(Byte))
{
Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGBA32, false);
byte[] data = new byte[size.Width * size.Height * 4];
GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
using (Image<Rgba, byte> rgba = new Image<Rgba, byte>(size.Width, size.Height, size.Width * 4, dataHandle.AddrOfPinnedObject()))
{
rgba.ConvertFrom(image);
if (correctForVerticleFlip)
CvInvoke.cvFlip(rgba, rgba, FLIP.VERTICAL);
}
dataHandle.Free();
texture.LoadRawTextureData(data);
texture.Apply();
return texture;
}
//return null;
}
If You don't want tuo use an InterOp You can use also something like
cameraframe.Convert<Rgb,byte>().Data.Cast<byte>().ToArray<byte>()
and use it instead of section using interOp
Both solutions worked for me. Just remember to destroy the texture before replacing it. I had memory leak issues before I did that.