Generate mesh from one-color texture - unity3d

I make a code to be able to draw and generate a sprite of this drawing. So I get a sprite with white background and my drawing (which is in a different color).
My question : How could I remove the white background at runtime ?(with C# code)
My problem is : I want to generated mesh using the drawing, but with white background I have 4 vertices (the fourth corners of the sprite) and I want to get all the vertices from the real shape I draw on my sprite (so much more than 4 vertices)
My current idea is to convert the drawing into having a transparent background and then use unity's sprite packer to generate a mesh from that.
My project: It’s a game, where we can create his own game circuit : user draw a black and white sprite —> I convert it to a mesh with collider and generated the new game circuit.
I already thin to clean all white pixels, but I don't think I will get many vertices with that technic.
Thanks for help,
Axel

using System.IO;
using UnityEngine.UI;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEditor;
using UnityEngine.Networking;
public class scri : MonoBehaviour
{
// For saving the mesh------------------------
public KeyCode saveKey = KeyCode.F12;
public string saveName = "SavedMesh";
// Concerning mesher--------------------------
public GameObject mesher; //require
public List<Vector3> vertices;
public List<int> triangles;
public Vector3 point0;
public Vector3 point1;
public Vector3 point2;
public Vector3 point3;
public int loop;
public float size;
public Mesh meshFilterMesh;
public Mesh meshColliderMesh;
// Sprite work
public Color[] pixels;
public Texture2D newTexture;
public Texture2D oldTexture; //require
private Sprite mySprite;
private SpriteRenderer spriteRenderer;
public int pathCount;
public GameObject displayerComponent; //require
public PolygonCollider2D polygonColliderAdded; //require
void Start()
{
// Mesher
vertices = new List<Vector3> ();
triangles = new List<int> ();
meshFilterMesh= mesher.GetComponent<MeshFilter>().mesh;
meshColliderMesh= mesher.GetComponent<MeshCollider>().sharedMesh;
size = 10; // lenght of the mesh in Z direction
loop=0;
// Sprite
pixels = oldTexture.GetPixels();
newTexture =new Texture2D(oldTexture.width,oldTexture.height,TextureFormat.ARGB32, false);
spriteRenderer = gameObject.AddComponent<SpriteRenderer>();
ConvertSpriteAndCreateCollider (pixels);
BrowseColliderToCreateMesh (polygonColliderAdded);
}
void Update()
{
// Save if F12 press
if (Input.GetKeyDown(saveKey)){SaveAsset();}
}
public void ConvertSpriteAndCreateCollider (Color[] pixels) {
for (int i = 0 ; i < pixels.Length ; i++ )
{
// delete all black pixel (black is the circuit, white is the walls)
if ((pixels[i].r==0 && pixels[i].g==0 && pixels[i].b==0 && pixels[i].a==1)) {
pixels[i] = Color.clear;
}
}
// Set a new texture with this pixel list
newTexture.SetPixels(pixels);
newTexture.Apply();
// Create a sprite from this texture
mySprite = Sprite.Create(newTexture, new Rect(0, 0, newTexture.width, newTexture.height), new Vector2(10.0f,10.0f), 10.0f, 0, SpriteMeshType.Tight,new Vector4(0,0,0,0),false);
// Add it to our displayerComponent
displayerComponent.GetComponent<SpriteRenderer>().sprite=mySprite;
// Add the polygon collider to our displayer Component and get his path count
polygonColliderAdded = displayerComponent.AddComponent<PolygonCollider2D>();
}
// Method to browse the collider and launch makemesh
public void BrowseColliderToCreateMesh (PolygonCollider2D polygonColliderAdded){
//browse all path from collider
pathCount=polygonColliderAdded.pathCount;
for (int i = 0; i < pathCount; i++)
{
Vector2[] path = polygonColliderAdded.GetPath(i);
// browse all path point
for (int j = 1; j < path.Length; j++)
{
if (j != (path.Length - 1)) // if we aren't at the last point
{
point0 = new Vector3(path[j-1].x ,path[j-1].y ,0);
point1 = new Vector3(path[j-1].x ,path[j-1].y ,size);
point2 = new Vector3(path[j].x ,path[j].y ,size);
point3 = new Vector3(path[j].x ,path[j].y ,0);
MakeMesh(point0,point1,point2,point3);
}
else if(j == (path.Length - 1))// if we are at the last point, we need to close the loop with the first point
{
point0 = new Vector3(path[j-1].x ,path[j-1].y ,0);
point1 = new Vector3(path[j-1].x ,path[j-1].y ,size);
point2 = new Vector3(path[j].x ,path[j].y ,size);
point3 = new Vector3(path[j].x ,path[j].y ,0);
MakeMesh(point0,point1,point2,point3);
point0 = new Vector3(path[j].x ,path[j].y ,0);
point1 = new Vector3(path[j].x ,path[j].y ,size);
point2 = new Vector3(path[0].x ,path[0].y ,size); // First point
point3 = new Vector3(path[0].x ,path[0].y ,0); // First point
MakeMesh(point0,point1,point2,point3);
}
}
}
}
//Method to generate 2 triangles mesh from the 4 points 0 1 2 3 and add it to the collider
public void MakeMesh (Vector3 point0,Vector3 point1,Vector3 point2, Vector3 point3){
// Vertice add
vertices.Add(point0);
vertices.Add(point1);
vertices.Add(point2);
vertices.Add(point3);
//Triangle order
triangles.Add(0+loop*4);
triangles.Add(2+loop*4);
triangles.Add(1+loop*4);
triangles.Add(0+loop*4);
triangles.Add(3+loop*4);
triangles.Add(2+loop*4);
loop = loop + 1;
// create mesh
meshFilterMesh.vertices=vertices.ToArray();
meshFilterMesh.triangles=triangles.ToArray();
// add this mesh to the MeshCollider
mesher.GetComponent<MeshCollider>().sharedMesh=meshFilterMesh;
}
// Save if F12 press
public void SaveAsset()
{
var mf = mesher.GetComponent<MeshFilter>();
if (mf)
{
var savePath = "Assets/" + saveName + ".asset";
Debug.Log("Saved Mesh to:" + savePath);
AssetDatabase.CreateAsset(mf.mesh, savePath);
}
}
}

One approach is to generate the mesh directly on your own terms. The pro of this is that you can have very fine control of exactly what you want pixel boundaries to look like, and you have better information do your own triangulation of the mesh. The downside is that you have to do all of this yourself.
One way of implementing this is to use the Marching Squares algorithm to generate isobands from the pixel data (You can use the blue/green/alpha channel to get the isovalue depending on if the background is white or transparent), and then generate a piece of the mesh from each of the 2x2 pixel grounds that have a part of the isoband.
To get the pixel data from the image you can use Texture2D.GetPixels. Then you can use the marching squares algorithm on that information to determine how to represent every 2x2 cluster of pixels in the mesh. Then you would use that information to find the vertices of each triangle that represents that quad of pixels.
Once you convert each quad of pixels into triangles, arrange the vertices of those triangles into an array (make sure you order the vertices of each triangle in a clockwise direction from the visible side) and use Mesh.SetVertices to create a mesh with those vertices.

Another approach is to set the alpha of any non-red pixel to zero, and let Unity's sprite packer generate the mesh for you.
Here is one way to do that:
If it is an asset and you want to modify it, set the texture asset to have Read/Write enabled checked. If the texture is created at runtime (and is therefore not an asset) this step can be skipped.
Get the pixel data with Texture2D.GetPixels. This will get you an array of pixels in the form of Color[] pixels:
public Texture2D tex;
...
Color[] pixels = tex.GetPixels();
Iterate through each index and replace pixels with any amount of blue (such as white pixels) with clear pixels:
for (int i = 0 ; i < pixels.Length ; i++ )
{
if (
pixels[i].r != 1f
|| pixels[i].g != 0f
|| pixels[i].b != 0f)
pixels[i] = Color.clear;
}
Set the texture pixel data with the modified pixel array:
tex.SetPixels(pixels);
tex.Apply();
The downside to this approach is that I do not know if you can use the Unity spritepacker to pack textures created at run time onto the sprite atlas. If it can not, then a different tool would be needed for this approach to generate meshes from sprites at run time.

Ok I've made something :
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class scri : MonoBehaviour
{
public Texture2D tex;
public Texture2D newText;
public Sprite sprite;
public List<Color> colorList;
private Sprite mySprite;
private SpriteRenderer sr;
// Start is called before the first frame update
void Start()
{
sr = gameObject.AddComponent<SpriteRenderer>() as SpriteRenderer;
newText =new Texture2D(tex.width,tex.height,TextureFormat.ARGB32, false);
Color[] pixels = sprite.texture.GetPixels();
for (int i = 0 ; i < pixels.Length ; i++ )
{
Debug.Log(pixels[i]);
if (pixels[i].r==1) {
pixels[i] = Color.clear;
}
}
newText.SetPixels(pixels);
newText.Apply();
mySprite = Sprite.Create(newText, new Rect(0.0f, 0.0f, newText.width, newText.height), new Vector2(0.5f, 0.5f), 100.0f);
sr.sprite = mySprite;
}
// Update is called once per frame
// void Update()
// {
// Debug.Log(sprite.triangles.Length);
// Debug.Log(sprite.vertices.Length);
// }
}
Usefull link :
https://forum.unity.com/threads/setting-pixel-to-transparent-turns-out-black.172375/
https://docs.unity3d.com/ScriptReference/Sprite.Create.html
https://forum.unity.com/threads/is-it-possible-to-convert-a-texture2d-from-one-format-to-another-in-standalone-run-time.327141/
https://forum.unity.com/threads/texture-setpixels.431177/
But I don't know why, if the png has a white background at the begining it doesn't work well ... :
With svg it's ok since the begining without my code.
But in sprite Editor I could generate a custom physic shape:

Related

How to draw line or rectangle to Plane in code?

I create a Plane of 3D object in my scene, and also write a c# script as below with it. But the plane's color is not changed, still show white color, why? BTW, the plane use Mesh Renderer component.
private Texture2D drawTexture;
private Color[] buffer;
// Start is called before the first frame update
void Start()
{
Texture2D mainTexture =
(Texture2D)GetComponent<Renderer>().material.mainTexture;
Color[] pixels = mainTexture.GetPixels();
buffer = new Color[pixels.Length];
pixels.CopyTo(buffer, 0);
// Change pixel color of drawing area
for (int i = 0; i < pixels.Length; ++i)
{
buffer.SetValue(Color.red, i);
}
// Update pixels of texture with changed pixels
drawTexture = new Texture2D(mainTexture.width,
mainTexture.height, TextureFormat.RGBA32, false);
drawTexture.filterMode = FilterMode.Point;
drawTexture.SetPixels(buffer);
drawTexture.Apply();
GetComponent<Renderer>().material.mainTexture = drawTexture;
}

MeshRenderer has wrong bounds when rotated

When I try to get the bounds of my models (created in Blender) and show them in Inspector:
As you can see the bounds are correct when the objects are not rotated. But when they are (left-most object) bounds start getting totally wrong.
Here is a script that shows / gets the bounds:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class GetBounds : MonoBehaviour
{
public MeshRenderer mesh_renderer = null;
public bool show_bounds = false;
private void OnDrawGizmos()
{
if (!show_bounds) return;
Gizmos.DrawWireCube(mesh_renderer.bounds.center, mesh_renderer.bounds.size);
Gizmos.DrawWireSphere(mesh_renderer.bounds.center, 0.3f);
}
}
How can I fix this?
In this thread I have come across this image which explains it pretty much
Unity dos not recalculate the Mesh.bounds all the time except when you add a mesh for the first time or "manually" invoke Mesh.RecalculateBounds.
It then uses this local space Mesh.bounds in order to calculate the translated, scaled and rotated Renderer.bounds in global space based on the Mesh.bounds. This way it always has to iterate a fixed amount of 8 vertices of the bounding box.
There was also a solution provided if you want to get the exact bounds calculated directly from the vertices. I adopted and cleaned it up a bit
public class GetBounds : MonoBehaviour
{
public MeshRenderer mesh_renderer;
public bool show_bounds;
public MeshFilter meshFilter;
public Mesh mesh;
private void OnDrawGizmos()
{
if (!mesh_renderer) return;
if (!show_bounds) return;
if (!meshFilter) meshFilter = mesh_renderer.GetComponent<MeshFilter>();
if (!meshFilter) return;
if (!mesh) mesh = meshFilter.mesh;
if (!mesh) return;
var vertices = mesh.vertices;
if (vertices.Length <= 0) return;
// TransformPoint converts the local mesh vertice dependent on the transform
// position, scale and orientation into a global position
var min = transform.TransformPoint(vertices[0]);
var max = min;
// Iterate through all vertices
// except first one
for (var i = 1; i < vertices.Length; i++)
{
var V = transform.TransformPoint(vertices[i]);
// Go through X,Y and Z of the Vector3
for (var n = 0; n < 3; n++)
{
max = Vector3.Max(V, max);
min = Vector3.Min(V, min);
}
}
var bounds = new Bounds();
bounds.SetMinMax(min, max);
// ust to compare it to the original bounds
Gizmos.DrawWireCube(mesh_renderer.bounds.center, mesh_renderer.bounds.size);
Gizmos.DrawWireSphere(mesh_renderer.bounds.center, 0.3f);
Gizmos.color = Color.green;
Gizmos.DrawWireCube(bounds.center, bounds.size);
Gizmos.DrawWireSphere(bounds.center, 0.3f);
}
}
Result:
In WHITE: The MeshRenderer.bounds
In GREEN: The "correct" calculated vertex bounds

Unity - How to set the color of an individual face when clicking a mesh?

Yesterday others on Stack Overflow helped me determine how to recolor a mesh triangle to red by clicking on it, it works great, the only problem is that the 3 vertices that get recolored are shared between triangles. This results in coloration that looks rather smeared. I'm really hoping there's a way to color only a single face (or normal if you will).
I've attached the following script to my mesh that uses a raycast to determine the surface coordinate and translate a green cube there. The gif below will better illustrate this problem.
Once again, any help or insight into this would be greatly appreciated. Thanks!
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class MyRayDraw : MonoBehaviour
{
public GameObject cube;
private MeshRenderer meshRenderer;
Mesh mesh;
Vector3[] vertices;
Color[] colorArray;
private void Start()
{
mesh = transform.GetComponent<MeshFilter>().mesh;
vertices = mesh.vertices;
colorArray = new Color[vertices.Length];
for (int k = 0; k < vertices.Length; k++)
{
colorArray[k] = Color.white;
}
mesh.colors = colorArray;
}
void Update()
{
if (Input.GetMouseButtonDown(0))
{
Ray ray = Camera.main.ScreenPointToRay(Input.mousePosition);
if (Physics.Raycast(ray, out RaycastHit hit))
{
Snap(hit.point); // Moves the green cube
int[] triangles = mesh.triangles;
var vertIndex1 = triangles[hit.triangleIndex * 3 + 0];
var vertIndex2 = triangles[hit.triangleIndex * 3 + 1];
var vertIndex3 = triangles[hit.triangleIndex * 3 + 2];
colorArray[vertIndex1] = Color.red;
colorArray[vertIndex2] = Color.red;
colorArray[vertIndex3] = Color.red;
mesh.colors = colorArray;
}
else
{
Debug.Log("no hit");
}
}
}
}
As you say the issue is that the vertices are shared between triangles but coloring is always vertex based.
The idea for a solution is:
for each vertex of the hit triangle check if it is used by other triangles
if so copy its position to create a new separated vertex
update the triangle to use the newly created vertex indices
(evtl.) use RecalculateNormals to make the triangles face outside without having to care about the order of provided vertices
using System.Linq;
using UnityEngine;
public class MyRayDraw : MonoBehaviour
{
public GameObject cube;
// Better to reference those already in the Inspector
[SerializeField] private MeshFilter meshFilter;
[SerializeField] private MeshRenderer meshRenderer;
[SerializeField] private MeshCollider meshCollider;
private Mesh _mesh;
private void Awake()
{
if (!meshFilter) meshFilter = GetComponent<MeshFilter>();
if (!meshRenderer) meshRenderer = GetComponent<MeshRenderer>();
if (!meshCollider) meshCollider = GetComponent<MeshCollider>();
_mesh = meshFilter.mesh;
// create new colors array where the colors will be created
var colors = new Color[_mesh.vertices.Length];
for (var k = 0; k < colors.Length; k++)
{
colors[k] = Color.white;
}
_mesh.colors = colors;
}
private void Update()
{
if (!Input.GetMouseButtonDown(0)) return;
var ray = Camera.main.ScreenPointToRay(Input.mousePosition);
if (Physics.Raycast(ray, out var hit))
{
Debug.Log(hit.triangleIndex);
//cube.transform.position = hit.point;
// Get current vertices, triangles and colors
var vertices = _mesh.vertices;
var triangles = _mesh.triangles;
var colors = _mesh.colors;
// Get the vert indices for this triangle
var vert1Index = triangles[hit.triangleIndex * 3 + 0];
var vert2Index = triangles[hit.triangleIndex * 3 + 1];
var vert3Index = triangles[hit.triangleIndex * 3 + 2];
// Get the positions for the vertices
var vert1Pos = vertices[vert1Index];
var vert2Pos = vertices[vert2Index];
var vert3Pos = vertices[vert3Index];
// Now for all three vertices we first check if any other triangle if using it
// by simply count how often the indices are used in the triangles list
var vert1Occurrences = 0;
var vert2Occurrences = 0;
var vert3Occurrences = 0;
foreach (var index in triangles)
{
if (index == vert1Index) vert1Occurrences++;
else if (index == vert2Index) vert2Occurrences++;
else if (index == vert3Index) vert3Occurrences++;
}
// Create copied Lists so we can dynamically add entries
var newVertices = vertices.ToList();
var newColors = colors.ToList();
// Now if a vertex is shared we need to add a new individual vertex
// and also an according entry for the color array
// and update the vertex index
// otherwise we will simply use the vertex we already have
if (vert1Occurrences > 1)
{
newVertices.Add(vert1Pos);
newColors.Add(new Color());
vert1Index = newVertices.Count - 1;
}
if (vert2Occurrences > 1)
{
newVertices.Add(vert2Pos);
newColors.Add(new Color());
vert2Index = newVertices.Count - 1;
}
if (vert3Occurrences > 1)
{
newVertices.Add(vert3Pos);
newColors.Add(new Color());
vert3Index = newVertices.Count - 1;
}
// Update the indices of the hit triangle to use the (eventually) new
// vertices instead
triangles[hit.triangleIndex * 3 + 0] = vert1Index;
triangles[hit.triangleIndex * 3 + 1] = vert2Index;
triangles[hit.triangleIndex * 3 + 2] = vert3Index;
// color these vertices
newColors[vert1Index] = Color.red;
newColors[vert2Index] = Color.red;
newColors[vert3Index] = Color.red;
// write everything back
_mesh.vertices = newVertices.ToArray();
_mesh.triangles = triangles;
_mesh.colors = newColors.ToArray();
_mesh.RecalculateNormals();
}
else
{
Debug.Log("no hit");
}
}
}
Note, however, that this works with simple coloring but might not for complex textures with UV mapping. You would have to also update the mesh.uv if using UV mapped textures.

Unity game - generate circuit with line drawing

I'm not a very beginer in unity, but not so far ;).
Today, I want to make a game which is a mix pool pinball.
The game physic is quite ok for me, but my big challenge is :
what i want to do
Allow the player to draw his own circuit and then place the different item : bumper, sticky wall ...
I make it in 3D, but with the camera position, it will be like 2D.
First of all, I don't know how to make curvy gameobject. I know about bezier curve generator, but not how generate shape with.
Ideally, it would be great if I could : draw a line (with the mouse or finder position) like a line renderer component and then unity extrude in one direction the field circuit. And that' it I have my circuit.
The second challenge will be to place (whit mouse or finger) the different component : bumper, players... with some rules : A sticky wall have to be on a simple wall from the circuit, not in the middle, you have to alway keep some space for the balls etc...
Then we create a game circuit, then we could play :):)
I work with the last unity version.
Obviously, I don't expect a full solution:rolleyes:, but according to you, is it possible ? And which way, which technic I should learn to do that? Which could be the big issu ? ... any remark advice is good to take.
I already begin to look at https://unity3d.com/fr/learn/tutori...ation-tutorial/creating-meshes?playlist=17153, I d'ont know if it's completly too much or not ?
Many thanks for your help,
Axel
Here my solution
using System.IO;
using UnityEngine.UI;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEditor;
using UnityEngine.Networking;
public class scri : MonoBehaviour
{
// For saving the mesh------------------------
public KeyCode saveKey = KeyCode.F12;
public string saveName = "SavedMesh";
// Concerning mesher--------------------------
public GameObject mesher; //require
public List<Vector3> vertices;
public List<int> triangles;
public Vector3 point0;
public Vector3 point1;
public Vector3 point2;
public Vector3 point3;
public int loop;
public float size;
public Mesh meshFilterMesh;
public Mesh meshColliderMesh;
// Sprite work
public Color[] pixels;
public Texture2D newTexture;
public Texture2D oldTexture; //require
private Sprite mySprite;
private SpriteRenderer spriteRenderer;
public int pathCount;
public GameObject displayerComponent; //require
public PolygonCollider2D polygonColliderAdded; //require
void Start()
{
// Mesher
vertices = new List<Vector3> ();
triangles = new List<int> ();
meshFilterMesh= mesher.GetComponent<MeshFilter>().mesh;
meshColliderMesh= mesher.GetComponent<MeshCollider>().sharedMesh;
size = 10; // lenght of the mesh in Z direction
loop=0;
// Sprite
pixels = oldTexture.GetPixels();
newTexture =new Texture2D(oldTexture.width,oldTexture.height,TextureFormat.ARGB32, false);
spriteRenderer = gameObject.AddComponent<SpriteRenderer>();
ConvertSpriteAndCreateCollider (pixels);
BrowseColliderToCreateMesh (polygonColliderAdded);
}
void Update()
{
// Save if F12 press
if (Input.GetKeyDown(saveKey)){SaveAsset();}
}
public void ConvertSpriteAndCreateCollider (Color[] pixels) {
for (int i = 0 ; i < pixels.Length ; i++ )
{
// delete all black pixel (black is the circuit, white is the walls)
if ((pixels[i].r==0 && pixels[i].g==0 && pixels[i].b==0 && pixels[i].a==1)) {
pixels[i] = Color.clear;
}
}
// Set a new texture with this pixel list
newTexture.SetPixels(pixels);
newTexture.Apply();
// Create a sprite from this texture
mySprite = Sprite.Create(newTexture, new Rect(0, 0, newTexture.width, newTexture.height), new Vector2(10.0f,10.0f), 10.0f, 0, SpriteMeshType.Tight,new Vector4(0,0,0,0),false);
// Add it to our displayerComponent
displayerComponent.GetComponent<SpriteRenderer>().sprite=mySprite;
// Add the polygon collider to our displayer Component and get his path count
polygonColliderAdded = displayerComponent.AddComponent<PolygonCollider2D>();
}
// Method to browse the collider and launch makemesh
public void BrowseColliderToCreateMesh (PolygonCollider2D polygonColliderAdded){
//browse all path from collider
pathCount=polygonColliderAdded.pathCount;
for (int i = 0; i < pathCount; i++)
{
Vector2[] path = polygonColliderAdded.GetPath(i);
// browse all path point
for (int j = 1; j < path.Length; j++)
{
if (j != (path.Length - 1)) // if we aren't at the last point
{
point0 = new Vector3(path[j-1].x ,path[j-1].y ,0);
point1 = new Vector3(path[j-1].x ,path[j-1].y ,size);
point2 = new Vector3(path[j].x ,path[j].y ,size);
point3 = new Vector3(path[j].x ,path[j].y ,0);
MakeMesh(point0,point1,point2,point3);
}
else if(j == (path.Length - 1))// if we are at the last point, we need to close the loop with the first point
{
point0 = new Vector3(path[j-1].x ,path[j-1].y ,0);
point1 = new Vector3(path[j-1].x ,path[j-1].y ,size);
point2 = new Vector3(path[j].x ,path[j].y ,size);
point3 = new Vector3(path[j].x ,path[j].y ,0);
MakeMesh(point0,point1,point2,point3);
point0 = new Vector3(path[j].x ,path[j].y ,0);
point1 = new Vector3(path[j].x ,path[j].y ,size);
point2 = new Vector3(path[0].x ,path[0].y ,size); // First point
point3 = new Vector3(path[0].x ,path[0].y ,0); // First point
MakeMesh(point0,point1,point2,point3);
}
}
}
}
//Method to generate 2 triangles mesh from the 4 points 0 1 2 3 and add it to the collider
public void MakeMesh (Vector3 point0,Vector3 point1,Vector3 point2, Vector3 point3){
// Vertice add
vertices.Add(point0);
vertices.Add(point1);
vertices.Add(point2);
vertices.Add(point3);
//Triangle order
triangles.Add(0+loop*4);
triangles.Add(2+loop*4);
triangles.Add(1+loop*4);
triangles.Add(0+loop*4);
triangles.Add(3+loop*4);
triangles.Add(2+loop*4);
loop = loop + 1;
// create mesh
meshFilterMesh.vertices=vertices.ToArray();
meshFilterMesh.triangles=triangles.ToArray();
// add this mesh to the MeshCollider
mesher.GetComponent<MeshCollider>().sharedMesh=meshFilterMesh;
}
// Save if F12 press
public void SaveAsset()
{
var mf = mesher.GetComponent<MeshFilter>();
if (mf)
{
var savePath = "Assets/" + saveName + ".asset";
Debug.Log("Saved Mesh to:" + savePath);
AssetDatabase.CreateAsset(mf.mesh, savePath);
}
}
}

Wrong result when using setPixel()

I'm dealing with a problem for a few days now with setPixel() on Texture2D.
What i'm doing is getting mouse position or touch position(on android), then using that in setPixel() with transparent color. But the result i'm getting occur elsewhere instead of exactly where the mouse is...
public class EarshPic : MonoBehaviour {
public SpriteRenderer sr;
public SpriteRenderer srO;
public Camera c;
// Use this for initialization
void Start () {
CreateCover();//This method is working fine
}
private void CreateCover()
{
Color color = new Color(0.5F, 0.5f, 0.5F, 1.0F);
int x = srO.sprite.texture.width;
int y = srO.sprite.texture.height;
Texture2D tmpTexture = new Texture2D(srO.sprite.texture.width,
srO.sprite.texture.height);
for (int i = 0; i < tmpTexture.width; i++)
{
for (int j = 0; j < tmpTexture.height; j++)
{
tmpTexture.SetPixel(i, j, color);
}
}
tmpTexture.Apply(true);
sr.sprite = Sprite.Create(tmpTexture, srO.sprite.rect,
new Vector2(0.5f, 0.5f),srO.sprite.pixelsPerUnit);
}
// I have problem in that method
// Vector2 v = mousePostion or touchpostion
void Eraser(Vector2 v)
{
Color color = new Color(0.5F, 0.5f, 0.5F, 0.0F);
sr.sprite.texture.SetPixel(v.x, v.y, color);
sr.sprite.texture.Apply(true);
}
// Update is called once per frame
void Update () {
if(Input.mousePosition!=null)
{
Eraser(Input.mousePosition);
}
if (Input.touchCount == 1)
{
Touch touch = Input.GetTouch(0);
switch (touch.phase)
{
case TouchPhase.Moved:
Eraser(touch.position);
break;
}
}
}
}
Problem
You are mixing different coordinates. This is the case if the texture is not perfectly screen sized. Your click is in screen coordinates and you are using it to set the transparency in texture coordinates.
Solution
This one requires the use of 3D models with colliders and textures on them. For 2D scenario you can use a box and set its texture to your 2D sprite. I don't know any easier method, but hopefully there is.
You have to first convert the screen position to world coordinate ray. This can be done with Camera.ScreenPointToRay.
Then you need to Physics.Raycast that ray to chech which position of the 3d model's collider it is intersecting with.
The intersection point can be changed to texture coordinates with RaycastHit.textureCoord. In the previous link, you can find a complete code example of the whole process.