I have a file where written array of vertexes, indexes, uv, textures and so on, in two words everything in order to draw model with texture on it. For example it is should be 3d cube with texture like wood.
So, what I have for now is - I can present vertexes of cube(I see my model), but I don't know how to apply a texture for this.
there is my code -
public void Start()
{
m_stream = DecoderAPI.create_stream_decoder_obj();
string pathToFile = "path_to_my_file";
bool isInitialized = DecoderAPI.stream_init_model(m_stream, pathToFile);
if (isInitialized)
{
m_curFrame = DecoderAPI.stream_get_frame_obj(m_stream, 1);
MeshRenderer meshRenderer = gameObject.AddComponent<MeshRenderer>();
meshRenderer.sharedMaterial = new Material(Shader.Find("Standard"));
Mesh mesh = new Mesh();
//Vertices***
int vertexCount = DecoderAPI.frame_get_vertex_count(m_curFrame);
int xyzArrSize = vertexCount * 3;
float[] xyzArray = new float[xyzArrSize];
IntPtr xyz = DecoderAPI.frame_get_vertex_xyz(m_curFrame);
Marshal.Copy(xyz, xyzArray, 0, xyzArrSize);
Vector3[] vertices = new Vector3[vertexCount];
for (int i = 0; i < vertexCount; i++)
{
vertices[i] = new Vector3(xyzArray[i * 3], xyzArray[i * 3 + 1], xyzArray[i * 3 + 2]);
}
mesh.vertices = vertices;
//***
//Faces***
int faceCount = DecoderAPI.frame_face_count(m_curFrame);
int trisArrSize = faceCount * 3;
int[] tris = new int[trisArrSize];
IntPtr indices = DecoderAPI.frame_face_indices(m_curFrame);
Marshal.Copy(indices, tris, 0, trisArrSize);
mesh.triangles = tris;
//***
mesh.RecalculateNormals();
MeshFilter meshFilter = gameObject.AddComponent<MeshFilter>();
meshFilter.mesh = mesh;
//TEXTURE ****
int uvCount = DecoderAPI.frame_get_uv_count(m_curFrame);
IntPtr uvData = DecoderAPI.frame_get_uv_data(m_curFrame);
IntPtr textureObj = DecoderAPI.frame_get_texture_obj(m_curFrame);
DecoderAPI.TextureInfo textureInfo = DecoderAPI.texture_get_info(textureObj);
int width = textureInfo.width;
int height = textureInfo.height;
int channels = textureInfo.channels;
int stride = textureInfo.stride;
DecoderAPI.ColorType color_type = textureInfo.color_type;
IntPtr pixels = textureInfo.pixels;
HOW TO APPLY THIS TEXTURE DATA TO MY MODEL????
//***
DecoderAPI.frame_release(m_curFrame);
}
}
I found this answer - https://answers.unity.com/questions/390878/how-do-i-apply-a-texture-to-a-3d-model.html
but I need to know to apply it dynamically
Any suggestions? Or maybe some thinks to tutorials?
EDIT
public void Start()
{
m_stream = DecoderAPI.create_stream_decoder_obj();
string pathToFile = "my_path_to_file";
bool isInitialized = DecoderAPI.stream_init_model(m_stream, pathToFile);
if (isInitialized)
{
m_curFrame = DecoderAPI.stream_get_frame_obj(m_stream, 1);
MeshRenderer meshRenderer = gameObject.AddComponent<MeshRenderer>();
meshRenderer.sharedMaterial = new Material(Shader.Find("Standard"));
Mesh mesh = new Mesh();
//Vertices***
int vertexCount = DecoderAPI.frame_get_vertex_count(m_curFrame);
int xyzArrSize = vertexCount * 3;
float[] xyzArray = new float[xyzArrSize];
IntPtr xyz = DecoderAPI.frame_get_vertex_xyz(m_curFrame);
Marshal.Copy(xyz, xyzArray, 0, xyzArrSize);
Vector3[] vertices = new Vector3[vertexCount];
for (int i = 0; i < vertexCount; i++)
{
vertices[i] = new Vector3(xyzArray[i * 3], xyzArray[i * 3 + 1], xyzArray[i * 3 + 2]);
}
mesh.vertices = vertices;
//***
//Faces***
int faceCount = DecoderAPI.frame_face_count(m_curFrame);
int trisArrSize = faceCount * 3;
int[] tris = new int[trisArrSize];
IntPtr indices = DecoderAPI.frame_face_indices(m_curFrame);
Marshal.Copy(indices, tris, 0, trisArrSize);
mesh.triangles = tris;
//***
mesh.RecalculateNormals();
//UV***
int uvCount = DecoderAPI.frame_get_uv_count(m_curFrame);
IntPtr uvData = DecoderAPI.frame_get_uv_data(m_curFrame);
int uvArrSize = uvCount * 2;
float[] uvArr = new float[uvArrSize];
Vector2[] uv = new Vector2[uvCount];
Marshal.Copy(uvData, uvArr, 0, uvArrSize);
for(int i = 0; i < uvCount; i++)
{
uv[i] = new Vector2(uvArr[i * 2], uvArr[i * 2 + 1]);
}
mesh.uv = uv;
//***
MeshFilter meshFilter = gameObject.AddComponent<MeshFilter>();
meshFilter.mesh = mesh;
//TEXTURE ****
IntPtr textureObj = DecoderAPI.frame_get_texture_obj(m_curFrame);
DecoderAPI.TextureInfo textureInfo = DecoderAPI.texture_get_info(textureObj);
int width = textureInfo.width;
int height = textureInfo.height;
int channels = textureInfo.channels;
int stride = textureInfo.stride;
DecoderAPI.ColorType color_type = textureInfo.color_type;
IntPtr pixels = textureInfo.pixels;
Texture2D texture = new Texture2D(width, height);
texture.LoadRawTextureData(pixels, width * channels *height);
texture.Apply();
meshRenderer.material.SetTexture("_MainText", texture);
//***
DecoderAPI.frame_release(m_curFrame);
}
}
But for now I am getting such an error
UnityException: LoadRawTextureData: not enough data provided (will result in overread).
UnityEngine.Texture2D.LoadRawTextureData (System.IntPtr data, System.Int32 size) (at <a9810827dce3444a8e5c4e9f3f5e0828>:0)
Model.Start () (at Assets/Scripts/Model.cs:98)
What am I doing wrong?
First off, your code to construct the cube is missing the UV part, so even if you assign the texture to the material the result is undetermined. Look at the code samples in Mesh manual page about adding the UV as well: https://docs.unity3d.com/ScriptReference/Mesh.html
Once you have the UV, all you have to do is to set the texture using SetTexture (see https://docs.unity3d.com/ScriptReference/Material.SetTexture.html).
On a separate note, in your code, you are using Shared Material instead of Material: that is not advisable unless you have many objects all using the same material and you want to change them all.
EDIT:
To get a texture from a pixels buffer you create a Texture2D object of the given size and colour type, then you apply the data like this:
myTexture.LoadRawTextureData(myPixels);
myTexture.Apply();
~Pino
If you have a texture and a MeshRenderer, it works like this:
void SetYourTexture()
{
MeshRenderer yourMeshRenderer = GetComponent<MeshRenderer>();
//If you only need one texture at the material unity understand _MainText as the mainTexture;
yourMeshRenderer.material.SetTexture("_MainText", yourTexture);
}
Related
I'm learning about mesh rendering in unity.
I followed the documentation and was able to draw a blue quad on the screen.
However I can't figure out how render thinng such that the entire monitor is used.
I think what I'm missing is setting some sort of projections for this coordinate system to match. But how can I do that? Ideally I would like to do:
x_start = 0;
x_end = W;
y_start = 0;
y_end = H;
And have this quad cover the entire screen.
Here is the code:
using System.IO;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.XR;
public class MeshRendererTest : MonoBehaviour
{
// Start is called before the first frame update
static Material mat;
float z;
float W;
float H;
Matrix4x4 projectionMatrix;
void Start(){
W = 2480.0f;
H = 2416.0f;
//projectionMatrix = Matrix4x4.Ortho(0, (int)W, (int)H, 0, -1, 100);
Shader shader = Shader.Find("Hidden/Internal-Colored");
//Shader shader2 = Shader.Find("Standard");
//Shader shader2 = Shader.Find("Unlit/UnlitAlphaWithFade");
mat = new Material(shader);
mat.hideFlags = HideFlags.HideAndDontSave;
//texMat.hideFlags = HideFlags.HideAndDontSave;
// Turn backface culling off
mat.SetInt("_Cull", (int)UnityEngine.Rendering.CullMode.Off);
//texMat.SetInt("_Cull", (int)UnityEngine.Rendering.CullMode.Off);
// Turn off depth writes
mat.SetInt("_ZWrite", 0);
//texMat.SetInt("_ZWrite", 0);
z = 999.0f;
mat.SetColor("_Color", Color.blue);
mat.SetPass(0);
MeshRenderer meshRenderer = gameObject.AddComponent<MeshRenderer>();
if (meshRenderer == null){
Debug.Log("Mesh Renderer is NUll");
return;
}
//meshRenderer.sharedMaterial = new Material(Shader.Find("Standard"));
meshRenderer.sharedMaterial = mat;
MeshFilter meshFilter = gameObject.AddComponent<MeshFilter>();
if (meshFilter == null){
Debug.Log("Mesh Filter is NUll");
return;
}
Mesh mesh = new Mesh();
float x_start = 0;
float x_end = x_start + 100;
float y_start = 0;
float y_end = y_start + 600;
Vector3[] vertices = new Vector3[4]
{
new Vector3(x_start, y_start, z),
new Vector3(x_end, y_start, z),
new Vector3(x_start, y_end, z),
new Vector3(x_end, y_end, z)
};
mesh.vertices = vertices;
int[] tris = new int[6]
{
// lower left triangle
0, 2, 1,
// upper right triangle
2, 3, 1
};
mesh.triangles = tris;
Vector3[] normals = new Vector3[4]
{
-Vector3.forward,
-Vector3.forward,
-Vector3.forward,
-Vector3.forward
};
mesh.normals = normals;
Vector2[] uv = new Vector2[4]
{
new Vector2(0, 0),
new Vector2(1, 0),
new Vector2(0, 1),
new Vector2(1, 1)
};
mesh.uv = uv;
meshFilter.mesh = mesh;
}
void OnRenderObject(){
TestStuff();
}
void TestStuff(){
Camera.main.ResetProjectionMatrix();
Matrix4x4 newProj = Matrix4x4.identity;
newProj = newProj * transform.localToWorldMatrix;
newProj = newProj * Camera.main.projectionMatrix;
newProj = newProj * projectionMatrix;
Camera.main.projectionMatrix = newProj;
}
}
I'm implementing after image effect currently and I meet a problem with Graphics.DrawMesh. The code shows below
public class AfterImage3DByCombine : MonoBehaviour
{
public class AfterImange
{
public Mesh mesh;
public Material material;
// public Matrix4x4 matrix;
public float duration;
public float time;
}
protected SkinnedMeshRenderer[] skinRenderers;
protected MeshFilter[] filters;
protected int filtersCount = 0;
public bool IncludeMeshFilter = true;
public Material EffectMaterial;
public float Duration = 5;
public float Interval = 0.2f;
public float FadeoutTime = 1;
private float mTime = 5;
private List<AfterImange> mAfterImageList = new List<AfterImange>();
protected virtual void Awake()
{
skinRenderers = GetComponentsInChildren<SkinnedMeshRenderer>();
if (IncludeMeshFilter)
{
filters = GetComponentsInChildren<MeshFilter>();
filtersCount = filters.Length;
}
}
//call from another place to have after image effect
public void Play()
{
if (skinRenderers.Length + filtersCount <= 0)
{
return;
}
mTime = Duration;
StartCoroutine(AddAfterImage());
}
IEnumerator AddAfterImage()
{
while (mTime > 0)
{
CreateImage();
yield return new WaitForSeconds(Interval);
mTime -= Interval;
}
yield return null;
}
void CreateImage()
{
CombineInstance[] combineInstances = new CombineInstance[skinRenderers.Length + filtersCount];
int index = 0;
for (int i = 0; i < skinRenderers.Length; i++)
{
var render = skinRenderers[i];
var mesh = new Mesh();
render.BakeMesh(mesh);
combineInstances[index] = new CombineInstance
{
mesh = mesh,
transform = render.gameObject.transform.localToWorldMatrix,
subMeshIndex = 0
};
index++;
}
for (int i = 0; i < filtersCount; i++)
{
var render = filters[i];
var temp = (render.sharedMesh != null) ? render.sharedMesh : render.mesh;
var mesh = (Mesh)Instantiate(temp);
combineInstances[index] = new CombineInstance
{
mesh = mesh,
transform = render.gameObject.transform.localToWorldMatrix,
subMeshIndex = 0
};
index++;
}
Mesh combinedMesh = new Mesh();
combinedMesh.CombineMeshes(combineInstances, true, true);
mAfterImageList.Add(new AfterImange
{
mesh = combinedMesh,
material = new Material(EffectMaterial),
time = FadeoutTime,
duration = FadeoutTime,
});
}
void LateUpdate()
{
bool needRemove = false;
foreach (var image in mAfterImageList)
{
image.time -= Time.deltaTime;
if (image.material.HasProperty("_Color"))
{
Color color = Color.red;
color.a = Mathf.Max(0, image.time / image.duration);
image.material.SetColor("_Color", color);
}
Matrix4x4 mat = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, Vector3.one * 2f);
//public static void DrawMesh(Mesh mesh, Matrix4x4 matrix, Material material, int layer, Camera camera, int submeshIndex, MaterialPropertyBlock properties, ShadowCastingMode castShadows);
Graphics.DrawMesh(image.mesh, Matrix4x4.identity, image.material, gameObject.layer, null, 0, null, false);
if (image.time <= 0)
{
needRemove = true;
}
}
if (needRemove)
{
mAfterImageList.RemoveAll(x => x.time <= 0);
}
}
}
Since my prefab has 0.5 times scaling while it's running, then I pass a matrix with two times scaling Matrix4x4 mat = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, Vector3.one * 2f); into
Graphics.DrawMesh.
Then, the mesh created by Graphics.DrawMesh isn't in its original position, there is an offset between original mesh and created mesh.
And if I passed Matrix4x4.Identity into Graphics.DrawMesh, the created mesh will have 0.5 times scaling, which looks smaller than original mesh.
Why there is an offset and how could I eliminate the offset without chaning the prefab's scale?
Good afternoon! I'm trying to sew up two meshes. I do this as follows: first I convert the sprite into a mesh, then I duplicate the resulting mesh, shift it along the "z" axis, invert it, and then sew it up. But I faced such a problem: he sews rectangular meshes well, but in circular meshes there are some defects on the sides. So, how can you sew up these sides? (Materials and code)
public class ConvertSpriteInMesh : MonoBehaviour
{
public Sprite sprite;
private MeshDraft meshDraft = new MeshDraft();
private Mesh mesh;
void Start()
{
GetComponent<MeshFilter>().mesh = SpriteToMesh(sprite);
SewingUp();
}
/// <summary>
/// Sewing up nets
/// </summary>
private void SewingUp()
{
mesh = GetComponent<MeshFilter>().mesh;
meshDraft = new MeshDraft(mesh);
int leftVertical = mesh.vertices.Length / 2; // getting the beginning of the left vertical of the mesh
int index = mesh.vertices.Length;
for (int i = 0; i < leftVertical - 1; i++)
{
meshDraft.AddQuad(mesh.vertices[i], mesh.vertices[i+1], mesh.vertices[i + leftVertical + 1],mesh.vertices[i+leftVertical],
index);
index += 4;
}
GetComponent<MeshFilter>().mesh = meshDraft.ToMesh(); // assign the resulting mesh
}
/// <summary>
/// Convert Sprite to Mesh
/// </summary>
/// <param name="_sprite"></param>
/// <returns></returns>
private Mesh SpriteToMesh(Sprite _sprite)
{
// declaring variables
Mesh mesh = new Mesh();
Vector3[] _verticles;
int[] _triangle;
// assigning values
_verticles = Array.ConvertAll(_sprite.vertices, i => (Vector3)i);
_triangle = Array.ConvertAll(_sprite.triangles, i => (int)i);
// changing the size of the array
Array.Resize(ref _verticles, _verticles.Length * 2);
Array.Resize(ref _triangle, _triangle.Length * 2);
// adding another side
for (int i = 0; i < _verticles.Length / 2; i++)
{
_verticles[_verticles.Length / 2 + i] = new Vector3(_verticles[i].x, _verticles[i].y, 0.5f);
}
for (int i = 0; i < _triangle.Length / 2; i++)
{
_triangle[_triangle.Length / 2 + i] = _triangle[i] + (_verticles.Length / 2);
}
// invert the second side
for(int i = _triangle.Length / 2; i < _triangle.Length; i += 3) {
var temp = _triangle[i];
_triangle[i] = _triangle[i + 1];
_triangle[i + 1] = temp;
}
// assigning the mesh
mesh.vertices = _verticles;
mesh.triangles = _triangle;
mesh.RecalculateBounds();
mesh.RecalculateNormals();
return mesh;
}
}
public partial class MeshDraft {
public string name = "";
public List<Vector3> vertices = new List<Vector3>();
public List<int> triangles = new List<int>();
public List<Vector3> normals = new List<Vector3>();
public List<Vector4> tangents = new List<Vector4>();
public List<Vector2> uv = new List<Vector2>();
public List<Vector2> uv2 = new List<Vector2>();
public List<Vector2> uv3 = new List<Vector2>();
public List<Vector2> uv4 = new List<Vector2>();
public List<Color> colors = new List<Color>();
public MeshDraft(Mesh mesh) {
name = mesh.name;
vertices.AddRange(mesh.vertices);
triangles.AddRange(mesh.triangles);
normals.AddRange(mesh.normals);
tangents.AddRange(mesh.tangents);
uv.AddRange(mesh.uv);
uv2.AddRange(mesh.uv2);
uv3.AddRange(mesh.uv3);
uv4.AddRange(mesh.uv4);
colors.AddRange(mesh.colors);
}
public void AddQuad(Vector3 v0, Vector3 v1, Vector3 v2, Vector3 v3, int index, Color color = default(Color)) {
vertices.Add(v0);
vertices.Add(v1);
vertices.Add(v2);
vertices.Add(v3);
Vector3 normal0 = Vector3.Cross(v2 - v1, v3 - v1).normalized;
Vector3 normal1 = Vector3.Cross(v1 - v0, v2 - v0).normalized;
normals.Add(normal0);
normals.Add(normal0);
normals.Add(normal1);
normals.Add(normal1);
colors.Add(color);
colors.Add(color);
colors.Add(color);
colors.Add(color);
triangles.Add(index);
triangles.Add(index + 1);
triangles.Add(index + 2);
triangles.Add(index);
triangles.Add(index + 2);
triangles.Add(index + 3);
}
public Mesh ToMesh() {
var mesh = new Mesh { name = name };
mesh.SetVertices(vertices);
mesh.SetTriangles(triangles, 0);
mesh.SetNormals(normals);
mesh.SetTangents(tangents);
mesh.SetUVs(0, uv);
mesh.SetUVs(1, uv2);
mesh.SetUVs(2, uv3);
mesh.SetUVs(3, uv4);
mesh.SetColors(colors);
return mesh;
}
Successful stitching (screen)
Bad stitching (screen)
I was given an answer on another forum, who is interested, I will leave a link here - https://www.cyberforum.ru/unity/thread2823987.html
I see that when I create mesh and textures in Unity on each frame (30fps) feels like Unity doesn't release these data from memory after the usage.
There is my code
private bool UpdateFrame(int frameIdx)
{
bool result = true;
int readyBuffSize = DecoderAPI.stream_get_ready_buffer_size(m_stream);
if (m_currMeshFrameIndex != frameIdx
&& readyBuffSize > 0)
{
m_currMeshFrameIndex = frameIdx;
IntPtr frame = DecoderAPI.stream_get_next_frame_obj(m_stream);
if (frame == IntPtr.Zero)
{
result = false;
}
else
{
long sequentialFrameIdx = DecoderAPI.get_sequential_number(frame);
DebugMethod("UNITY UpdateFrame", $"readyBuffSize :: {readyBuffSize}");
DebugMethod("UNITY UpdateFrame", $"sequentialFrameIdx :: {sequentialFrameIdx}");
Mesh releaseFormer = m_meshFilter.mesh;
m_meshFilter.mesh = CrteateMesh(frame);
Texture2D texture = CreateTexture(frame);
m_meshRenderer.material.SetTexture("_MainTex", texture);
DecoderAPI.stream_release_frame_obj(m_stream, frame);
Destroy(releaseFormer); // does not seem to help: even when there are no more allocations in C++ the process grows endlessly
}
}
return result;
}
private Mesh CrteateMesh(IntPtr frame)
{
Mesh mesh = new Mesh();
//Vertices***
int vertexCount = DecoderAPI.frame_get_vertex_count(frame);
byte[] xyzBytes = new byte[vertexCount * 3 * 4];
IntPtr xyz = DecoderAPI.frame_get_vertex_xyz(frame);
Vector3[] vertices = new Vector3[vertexCount];
GCHandle handle = GCHandle.Alloc(vertices, GCHandleType.Pinned);
IntPtr pointer = handle.AddrOfPinnedObject();
Marshal.Copy(xyz, xyzBytes, 0, xyzBytes.Length);
Marshal.Copy(xyzBytes, 0, pointer, xyzBytes.Length);
handle.Free();
mesh.vertices = vertices;
//***
//Faces***
int faceCount = DecoderAPI.frame_face_count(frame);
int trisArrSize = faceCount * 3;
int[] tris = new int[trisArrSize];
IntPtr indices = DecoderAPI.frame_face_indices(frame);
Marshal.Copy(indices, tris, 0, trisArrSize);
mesh.triangles = tris;
//***
mesh.RecalculateNormals();
//UV***
int uvCount = DecoderAPI.frame_get_uv_count(frame);
IntPtr uvData = DecoderAPI.frame_get_uv_data(frame);
int uvArrSize = uvCount * 2;
float[] uvArr = new float[uvArrSize];
Vector2[] uv = new Vector2[uvCount];
Marshal.Copy(uvData, uvArr, 0, uvArrSize);
for (int i = 0; i < uvCount; i++)
{
Vector2 result = new Vector2(uvArr[i * 2], uvArr[i * 2 + 1]) * new Vector2(1, -1);
uv[i] = result;
}
mesh.uv = uv;
//***
if (vertexCount != uvCount)
{
long frameId = DecoderAPI.get_sequential_number(frame);
DebugMethod("UNITY CrteateMesh", $"HERE : in frame id :: {frameId}, vertexCount : {vertexCount}, uvCount : {uvCount}");
}
return mesh;
}
private Texture2D CreateTexture(IntPtr frame)
{
IntPtr textureObj = DecoderAPI.frame_get_texture_obj(frame);
DecoderAPI.TextureInfo textureInfo = DecoderAPI.texture_get_info(textureObj);
int width = textureInfo.width;
int height = textureInfo.height;
int channels = textureInfo.channels;
int stride = textureInfo.stride;
//DecoderAPI.ColorType colorType = textureInfo.color_type;
IntPtr pixels = textureInfo.pixels;
Texture2D texture = new Texture2D(width, height, TextureFormat.RGB24, false);
//Texture2D texture = new Texture2D(width, height, TextureFormat.DXT5, false);
texture.LoadRawTextureData(pixels, width * channels * height);
texture.Apply();
return texture;
}
So, what I do is - I create a mesh and texture for each frame use it and then I expect that Unity should release them from memory after the usage, but no. Ok, I found like this method Destroy(releaseFormer) should help, but anyway it is the same I see in TaskManager that memory grows endlessly...
For test I have tried -> I start my c++ code generate (let's say 100 frames) then I stop it (so my c++ doesn't allocate nothing) and I still see that memory grows up to the end. What I expect is - ok even if Unity doesn't release data that I don't need more, I loaded 100 frames that is it, why memory continues to grow?
Question is - how to release from memory all that frames that I don't need?
EDIT
I have changed this method, added Destroy in proper order
private bool UpdateFrame(int frameIdx)
{
bool result = true;
int readyBuffSize = -1;
if (m_stream != IntPtr.Zero)
{
readyBuffSize = DecoderAPI.stream_get_ready_buffer_size(m_stream);
}
if (m_currMeshFrameIndex != frameIdx
&& readyBuffSize > 0)
{
m_currMeshFrameIndex = frameIdx;
IntPtr frame = DecoderAPI.stream_get_next_frame_obj(m_stream);
if (frame == IntPtr.Zero)
{
result = false;
}
else
{
long sequentialFrameIdx = DecoderAPI.frame_get_sequential_number(frame);
DebugMethod("UNITY UpdateFrame", $"readyBuffSize :: {readyBuffSize}");
DebugMethod("UNITY UpdateFrame", $"sequentialFrameIdx :: {sequentialFrameIdx}");
if (m_meshFilter.mesh != null)
{
Destroy(m_meshFilter.mesh);
}
m_meshFilter.mesh = CrteateMesh(frame);
if (m_texture != null)
{
Destroy(m_texture);
}
m_texture = CreateTexture(frame);
m_meshRenderer.material.SetTexture("_MainTex", m_texture);
if (m_stream != IntPtr.Zero)
{
DecoderAPI.stream_release_frame_obj(m_stream, frame);
}
}
}
return result;
}
releaseFormer is the mesh right? Did you try calling Destroy on the texture object itself?
Another thread suggested Resources.UnloadUnusedAssets()
Personally I'd be trying to do this with a RenderTexture especially if the texture size doesn't change too often, though might not be possible for your use case
Yesterday others on Stack Overflow helped me determine how to recolor a mesh triangle to red by clicking on it, it works great, the only problem is that the 3 vertices that get recolored are shared between triangles. This results in coloration that looks rather smeared. I'm really hoping there's a way to color only a single face (or normal if you will).
I've attached the following script to my mesh that uses a raycast to determine the surface coordinate and translate a green cube there. The gif below will better illustrate this problem.
Once again, any help or insight into this would be greatly appreciated. Thanks!
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class MyRayDraw : MonoBehaviour
{
public GameObject cube;
private MeshRenderer meshRenderer;
Mesh mesh;
Vector3[] vertices;
Color[] colorArray;
private void Start()
{
mesh = transform.GetComponent<MeshFilter>().mesh;
vertices = mesh.vertices;
colorArray = new Color[vertices.Length];
for (int k = 0; k < vertices.Length; k++)
{
colorArray[k] = Color.white;
}
mesh.colors = colorArray;
}
void Update()
{
if (Input.GetMouseButtonDown(0))
{
Ray ray = Camera.main.ScreenPointToRay(Input.mousePosition);
if (Physics.Raycast(ray, out RaycastHit hit))
{
Snap(hit.point); // Moves the green cube
int[] triangles = mesh.triangles;
var vertIndex1 = triangles[hit.triangleIndex * 3 + 0];
var vertIndex2 = triangles[hit.triangleIndex * 3 + 1];
var vertIndex3 = triangles[hit.triangleIndex * 3 + 2];
colorArray[vertIndex1] = Color.red;
colorArray[vertIndex2] = Color.red;
colorArray[vertIndex3] = Color.red;
mesh.colors = colorArray;
}
else
{
Debug.Log("no hit");
}
}
}
}
As you say the issue is that the vertices are shared between triangles but coloring is always vertex based.
The idea for a solution is:
for each vertex of the hit triangle check if it is used by other triangles
if so copy its position to create a new separated vertex
update the triangle to use the newly created vertex indices
(evtl.) use RecalculateNormals to make the triangles face outside without having to care about the order of provided vertices
using System.Linq;
using UnityEngine;
public class MyRayDraw : MonoBehaviour
{
public GameObject cube;
// Better to reference those already in the Inspector
[SerializeField] private MeshFilter meshFilter;
[SerializeField] private MeshRenderer meshRenderer;
[SerializeField] private MeshCollider meshCollider;
private Mesh _mesh;
private void Awake()
{
if (!meshFilter) meshFilter = GetComponent<MeshFilter>();
if (!meshRenderer) meshRenderer = GetComponent<MeshRenderer>();
if (!meshCollider) meshCollider = GetComponent<MeshCollider>();
_mesh = meshFilter.mesh;
// create new colors array where the colors will be created
var colors = new Color[_mesh.vertices.Length];
for (var k = 0; k < colors.Length; k++)
{
colors[k] = Color.white;
}
_mesh.colors = colors;
}
private void Update()
{
if (!Input.GetMouseButtonDown(0)) return;
var ray = Camera.main.ScreenPointToRay(Input.mousePosition);
if (Physics.Raycast(ray, out var hit))
{
Debug.Log(hit.triangleIndex);
//cube.transform.position = hit.point;
// Get current vertices, triangles and colors
var vertices = _mesh.vertices;
var triangles = _mesh.triangles;
var colors = _mesh.colors;
// Get the vert indices for this triangle
var vert1Index = triangles[hit.triangleIndex * 3 + 0];
var vert2Index = triangles[hit.triangleIndex * 3 + 1];
var vert3Index = triangles[hit.triangleIndex * 3 + 2];
// Get the positions for the vertices
var vert1Pos = vertices[vert1Index];
var vert2Pos = vertices[vert2Index];
var vert3Pos = vertices[vert3Index];
// Now for all three vertices we first check if any other triangle if using it
// by simply count how often the indices are used in the triangles list
var vert1Occurrences = 0;
var vert2Occurrences = 0;
var vert3Occurrences = 0;
foreach (var index in triangles)
{
if (index == vert1Index) vert1Occurrences++;
else if (index == vert2Index) vert2Occurrences++;
else if (index == vert3Index) vert3Occurrences++;
}
// Create copied Lists so we can dynamically add entries
var newVertices = vertices.ToList();
var newColors = colors.ToList();
// Now if a vertex is shared we need to add a new individual vertex
// and also an according entry for the color array
// and update the vertex index
// otherwise we will simply use the vertex we already have
if (vert1Occurrences > 1)
{
newVertices.Add(vert1Pos);
newColors.Add(new Color());
vert1Index = newVertices.Count - 1;
}
if (vert2Occurrences > 1)
{
newVertices.Add(vert2Pos);
newColors.Add(new Color());
vert2Index = newVertices.Count - 1;
}
if (vert3Occurrences > 1)
{
newVertices.Add(vert3Pos);
newColors.Add(new Color());
vert3Index = newVertices.Count - 1;
}
// Update the indices of the hit triangle to use the (eventually) new
// vertices instead
triangles[hit.triangleIndex * 3 + 0] = vert1Index;
triangles[hit.triangleIndex * 3 + 1] = vert2Index;
triangles[hit.triangleIndex * 3 + 2] = vert3Index;
// color these vertices
newColors[vert1Index] = Color.red;
newColors[vert2Index] = Color.red;
newColors[vert3Index] = Color.red;
// write everything back
_mesh.vertices = newVertices.ToArray();
_mesh.triangles = triangles;
_mesh.colors = newColors.ToArray();
_mesh.RecalculateNormals();
}
else
{
Debug.Log("no hit");
}
}
}
Note, however, that this works with simple coloring but might not for complex textures with UV mapping. You would have to also update the mesh.uv if using UV mapped textures.