I'm trying to make a nice looking forest to a new game im working on.
But theres a problem with the rendering of my shaders. I have it currently setup to render close by trees with an 2d-sprite renderer. And the other trees that arent close enough will get render using the shader instead. But the problem is with the trees that are using the shader. Cause when i look at them and move my camera up they just stop rendering.. why?
Here's a gif of what i mean.
https://gyazo.com/64acbf5cadd9a89b0ba2cd5f123605ce
And here's my shader script.
Shader "Custom/Tree_Billboard"
{
Properties
{
[PerRendererData] _MainTex("Sprite Texture", 2D) = "white" {}
_Color("Tint", Color) = (1,1,1,1)
//_Time ("Time", Float) = 0
[MaterialToggle] PixelSnap("Pixel snap", Float) = 0
}
SubShader
{
Tags
{
"Queue" = "Transparent"
"DisableBatching" = "True"
"SortingLayer" = "Resources_Sprites"
"IgnoreProjector" = "True"
"RenderType" = "Transparent"
"PreviewType" = "Plane"
"CanUseSpriteAtlas" = "True"
}
Cull Off
Lighting Off
ZWrite Off
Blend One OneMinusSrcAlpha
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 2.0
#pragma multi_compile _ PIXELSNAP_ON
#pragma multi_compile _ ETC1_EXTERNAL_ALPHA
#include "UnityCG.cginc"
// uniform Float _Time;
struct appdata_t
{
float4 vertex : POSITION;
float4 color : COLOR;
float2 texcoord : TEXCOORD0;
UNITY_VERTEX_INPUT_INSTANCE_ID
};
struct v2f
{
float4 vertex : SV_POSITION;
fixed4 color : COLOR;
float2 texcoord : TEXCOORD0;
UNITY_VERTEX_OUTPUT_STEREO
};
fixed4 _Color;
v2f vert(appdata_t IN)
{
v2f OUT;
UNITY_SETUP_INSTANCE_ID(IN);
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(OUT);
// OUT.vertex = UnityObjectToClipPos(IN.vertex);
OUT.texcoord = IN.texcoord;
OUT.color = IN.color * _Color;
// #ifdef PIXELSNAP_ON
OUT.vertex = mul(UNITY_MATRIX_P,
mul(UNITY_MATRIX_MV, float4(0.0, 4.1, 0.0, 1.0))
- float4(IN.vertex.x, -IN.vertex.y, 0.0, 0.0)
* float4(6.0, 8.0, 1.0, 1.0));
// OUT.vertex = UnityPixelSnap (OUT.vertex);
// #endif
return OUT;
}
sampler2D _MainTex;
sampler2D _AlphaTex;
fixed4 SampleSpriteTexture(float2 uv)
{
fixed4 color = tex2D(_MainTex, uv);
#if ETC1_EXTERNAL_ALPHA
// get the color from an external texture (usecase: Alpha support for ETC1 on android)
color.a = tex2D(_AlphaTex, uv).r;
#endif //ETC1_EXTERNAL_ALPHA
return color;
}
fixed4 frag(v2f IN) : SV_Target
{
fixed4 c = SampleSpriteTexture(IN.texcoord) * IN.color;
c.rgb *= c.a;
return c;
}
ENDCG
}
}
}
Here's what "jvo3dc" said on the Unity Forum's
"The objects get frustum culled. The culling is done on the bounding box of the object.
You're adjusting the vertices in the shader so they fall outside the original bounding box. This means the culling process can sometimes assume they are invisible, while they actually are visible.
The solution is to match the bounding box with the actual maximum displacement in the vertex shader."
So i went and searched a bit on Google and found a solution. I created a new quad-gameObject. Added a new scripts and added this to the Start() method and that seems to have fixed it.
transform.GetChild(0).GetComponent<MeshFilter>().sharedMesh.bounds = new Bounds(new Vector3(0, 0, 0), new Vector3(1, 1,1) * 10.0f);
(Here's the Unity forum post)
This is clearly not a shader problem but a problem with your clip plane. Check your camera settings first.
Related
I'm trying to write a sprite shader that can turn normal sprites into pixel art.
And it works relatively well IN the texture, but the entire things always has the shape of the original sprite instead of being whole squares.
I suspect that is because there is originally no vertices there, the shader doesnt go through these spots and thats why the squares are cut off.
Can I somehow make this possible though?
Original Image
Pixelated
Shader "Own/Pixel"
{
Properties
{
[PerRendererData]_MainTex("Texture", 2D) = "white" {}
_PosCorrection("Pos Correction", Range(0,1000)) = 1
_ColorCorrection("Color Correction", Float) = 1
}
SubShader
{
Tags
{
"Queue" = "AlphaTest"
"IgnoreProjector" = "True"
"RenderType" = "Transparent"
"PreviewType" = "Plane"
"CanUseSpriteAtlas" = "True"
}
ZWrite off
Cull Off
Lighting Off
Blend One OneMinusSrcAlpha
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile _ PIXELSNAP_ON
#include "UnityCG.cginc"
struct input
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct output
{
float2 uv : TEXCOORD0;
fixed4 color : COLOR;
float4 vertex : SV_POSITION;
};
float _ColorCorrection;
float _PosCorrection;
output vert(input v)
{
output o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.vertex = UnityPixelSnap(o.vertex);
o.uv = v.uv;
return o;
}
sampler2D _MainTex;
fixed4 frag(output i) : COLOR
{
float4 col = tex2D(_MainTex, round(i.uv*_PosCorrection) / _PosCorrection);
//// just invert the colors
//col.r = round(col.r * _ColorCorrection);
//col.g = round(col.g * _ColorCorrection);
//col.b = round(col.b * _ColorCorrection);
col.rgb *= col.a;
//return float4(col.rgb / _ColorCorrection,col.a);
return col;
}
ENDCG
}
}
}
This will be my second question regarding my ongoing VR project.
I am making a photosphere cardboard app.
I have added a sphere and inside of it, I have placed the Main camera on 0,0,0 position.
Inside of the cube I have placed a 3d cube. If user gazes towards it for some specific seconds, the equirectangular textures on the sphere will change via script.
Now I am using the GvrReticlePointer in this project, and I can't seem to find the white dot anywhere except when it is on the 3d cube.
I would really like to know what is causing the reticle to disappear on the scene and how to fix it.
Thanks in advance.
Answering this question myself!
Just use this shader. Works like a charm!
Shader "Custom/Equirectangular" {
Properties {
_Color ("Main Color", Color) = (1,1,1,1)
_MainTex ("Diffuse (RGB) Alpha (A)", 2D) = "gray" {}
}
SubShader{
Pass {
Cull Front
Tags {"LightMode" = "Always"}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
#pragma glsl
#pragma target 3.0
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct v2f
{
float4 pos : SV_POSITION;
float3 normal : TEXCOORD0;
};
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.normal = v.normal;
return o;
}
sampler2D _MainTex;
#define PI 3.141592653589793
inline float2 RadialCoords(float3 a_coords)
{
float3 a_coords_n = normalize(a_coords);
float lon = atan2(a_coords_n.z, a_coords_n.x);
float lat = acos(a_coords_n.y);
float2 sphereCoords = float2(lon, lat) * (1.0 / PI);
//return float2(sphereCoords.x * 0.5 + 0.5, 1 - sphereCoords.y);
return float2(1 - (sphereCoords.x * 0.5 + 0.5), 1 - sphereCoords.y);
}
float4 frag(v2f IN) : COLOR
{
float2 equiUV = RadialCoords(IN.normal);
return tex2D(_MainTex, equiUV);
}
ENDCG
}
}
FallBack "VertexLit"
}
I am making a 360 viewer in unity, to view a 360 photo I used to have a cubemap attached to a skybox, and it worked great. But the weight of the cubemaps forced me to switch to textures.
All of the 360 viewer tutorials say to just put a sphere with a shader on it, and put the camera inside. When I do this, it doesn't work very well, because when I look to the top or bottom, I see the image warped like so: (The chairs are suppossed to look normal)
It did not happen when I used a skybox.
Does any one know why is this happening?
Thank you very much!
The shader you choose does not handle equirectangular distortion very well. At the poles (top and bottom) of the sphere much image information has to be mapped on very small space which leads to the artifacts you are seeing.
You can write a specialized shader to improve the coordinate mapping from your equirectangular image onto the sphere. At the Unity forums a specialized shader has been posted.
Shader "Custom/Equirectangular" {
Properties {
_Color ("Main Color", Color) = (1,1,1,1)
_MainTex ("Diffuse (RGB) Alpha (A)", 2D) = "gray" {}
}
SubShader{
Pass {
Tags {"LightMode" = "Always"}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
#pragma glsl
#pragma target 3.0
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct v2f
{
float4 pos : SV_POSITION;
float3 normal : TEXCOORD0;
};
v2f vert (appdata v)
{
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
o.normal = v.normal;
return o;
}
sampler2D _MainTex;
#define PI 3.141592653589793
inline float2 RadialCoords(float3 a_coords)
{
float3 a_coords_n = normalize(a_coords);
float lon = atan2(a_coords_n.z, a_coords_n.x);
float lat = acos(a_coords_n.y);
float2 sphereCoords = float2(lon, lat) * (1.0 / PI);
return float2(sphereCoords.x * 0.5 + 0.5, 1 - sphereCoords.y);
}
float4 frag(v2f IN) : COLOR
{
float2 equiUV = RadialCoords(IN.normal);
return tex2D(_MainTex, equiUV);
}
ENDCG
}
}
FallBack "VertexLit"
}
Again, it's not my own code but I tested it on android devices and as a standalone PC version. It results in very smooth poles.
Please note: This shader does not flip normals of your sphere. So if you want your camera to sit inside the sphere you have to invert its normals with a 3d program or with the shader. Try adding Cull Front after line 9 above and the shader will apply its texture to the "wrong" side of the model.
I'm a beginner and I had to do a lot just to understand this thread. This is what worked for me. I just combined the answers and put it in one script. I'm pretty sure I will forget this in a few weeks time, so putting it here for posterity.
Shader "Custom/Equirectangular" {
Properties {
_Color ("Main Color", Color) = (1,1,1,1)
_MainTex ("Diffuse (RGB) Alpha (A)", 2D) = "gray" {}
}
SubShader{
Pass {
Tags {"LightMode" = "Always"}
Cull Front
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
#pragma glsl
#pragma target 3.0
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct v2f
{
float4 pos : SV_POSITION;
float3 normal : TEXCOORD0;
};
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.normal = v.normal;
return o;
}
sampler2D _MainTex;
#define PI 3.141592653589793
inline float2 RadialCoords(float3 a_coords)
{
float3 a_coords_n = normalize(a_coords);
float lon = atan2(a_coords_n.z, a_coords_n.x);
float lat = acos(a_coords_n.y);
float2 sphereCoords = float2(lon, lat) * (1.0 / PI);
return float2(1 - (sphereCoords.x * 0.5 + 0.5), 1 - sphereCoords.y);
}
float4 frag(v2f IN) : COLOR
{
float2 equiUV = RadialCoords(IN.normal);
return tex2D(_MainTex, equiUV);
}
ENDCG
}
}
FallBack "VertexLit"
}
This is another shader code.
'Shader "Flip Normals" {
Properties {
_MainTex ("Base (RGB)", 2D) = "white" {}
}
SubShader {
Tags { "RenderType" = "Opaque" }
Cull Front
CGPROGRAM
#pragma surface surf Lambert vertex:vert
sampler2D _MainTex;
struct Input {
float2 uv_MainTex;
float4 color : COLOR;
};
void vert(inout appdata_full v)
{
v.normal.xyz = v.normal * -1;
}
void surf (Input IN, inout SurfaceOutput o) {
fixed3 result = tex2D(_MainTex, IN.uv_MainTex);
o.Albedo = result.rgb;
o.Alpha = 1;
}
ENDCG
}
Fallback "Diffuse"
}`
First of all, I don't have any idea about shader programming but I need it for my game so I found a clip area shader on the internet then I was trying to modify it and add some features to it ! I added shader to my sprite and checked it on my PC and some android devices but after that I've checked it on HTC One (My friend mobile) but weird problem was happening !
As you see the left side is not clipping visually.
The first shader I found just had width and length for clipping sprite then I added width from left and width from right and color tint to the shader.
There is a code :
Shader "Sprites/ClipAreaWithAlpha"
{
Properties
{
_Color ("Color Tint", Color) = (1,1,1,1)
_MainTex ("Base (RGB), Alpha (A)", 2D) = "white" {}
_Length ("Length", Range(0.0, 1.0)) = 1.0
_WidthR("Width from right", Range(0.0, 1.0)) = 1.0
_WidthL ("Width from left", Range(0.0, 1.0)) = 0.0
}
SubShader
{
LOD 200
Tags
{
"Queue" = "Transparent"
"IgnoreProjector" = "True"
"RenderType" = "Transparent"
}
Pass
{
Cull Off
Lighting Off
ZWrite Off
Offset -1, -1
Fog { Mode Off }
ColorMask RGB
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _MainTex;
float4 _MainTex_ST;
float _Length;
float _WidthR;
float _WidthL;
half4 _Color;
struct appdata_t
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
struct v2f
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
half4 color : COLOR;
};
v2f vert (appdata_t v)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
o.texcoord = v.texcoord;
o.color = _Color;
return o;
}
half4 frag (v2f IN) : COLOR
{
if ((IN.texcoord.x<_WidthL) || (IN.texcoord.x>_WidthR) || (IN.texcoord.y<0) || (IN.texcoord.y>_Length))
{
half4 colorTransparent = half4(0,0,0,0) ;
return colorTransparent;
}
else
{
half4 tex = tex2D(_MainTex, IN.texcoord);
tex.a = IN.color.a;
return tex;
}
}
ENDCG
}
}
}
As I said before this shader is ok on another android devices that I checked but on HTC One it doesn't work nicely and correctly. I don't know where the problem is! I'm thankful for solution.
And this shader has this warning : MaterialPropertyBlock is used to modify these values
I have had the same problem. Try setting the value of Generate Mip Maps to off, then the shader works correctly.
I am trying to create a shader that can be used to clip 2D sprites in a game, I found this shader in another question
Shader "Sprites/ClipArea"
{
Properties
{
_MainTex ("Base (RGB), Alpha (A)", 2D) = "white" {}
_Length ("Length", Range(0.0, 1.0)) = 1.0
_Width ("Width", Range(0.0, 1.0)) = 0.5
}
SubShader
{
LOD 200
Tags
{
"Queue" = "Transparent"
"IgnoreProjector" = "True"
"RenderType" = "Transparent"
}
Pass
{
Cull Off
Lighting Off
ZWrite Off
Offset -1, -1
Fog { Mode Off }
ColorMask RGB
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _MainTex;
float4 _MainTex_ST;
float _Length;
float _Width;
struct appdata_t
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
struct v2f
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
v2f vert (appdata_t v)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
o.texcoord = v.texcoord;
return o;
}
half4 frag (v2f IN) : COLOR
{
if ((IN.texcoord.x<0) || (IN.texcoord.x>_Width) || (IN.texcoord.y<0) || (IN.texcoord.y>_Length))
{
half4 colorTransparent = half4(0,0,0,0) ;
return colorTransparent ;
}
return tex2D(_MainTex, IN.texcoord);
}
ENDCG
}
}
}
which works perfectly on single sprites, but I am using sprite sheets, divided by Unity Sprite editor.
The _Width variable in the shader is covering the whole sprite sheet, not the sprite I am working on. I searched for a way to get the current sprite rect inside the shader but couldnt find anything.
Well, after pulling my hair for three days, I managed to find a workaround.
After searching more about how shaders work and there role in the pipeline, I realized that the sprite rect info will probably be not available in the shader for one simple reason, the functionality of almost all shaders (except mine) does not require this info, because the job of the shader is to take a vertex, modify its position (if needed) through the vertex function and then decide its pixel colour through the fragment function, it does not care about the whole sprite, it only needs to lookup the pixel colour for a certain vertex from the texture using its texture coordinates.
I am sure this is trivial info for people working in shaders, but it took me time to realize it ( This was my first ever shader).
So as a workaround I had to use the shader properties to pass the MinX and MaxX of the current sprite the shader is working on in the sprite sheet, so the shader now looks like this:
Shader "Sprites/ClipArea"
{
Properties
{
_MainTex ("Base (RGB), Alpha (A)", 2D) = "white" {}
_Fill ("Fill", Range(0.0, 1.0)) = 1.0
_MinX ("MinX", Float) = 0
_MaxX ("MaxX", Float) = 1
}
SubShader
{
LOD 200
Tags
{
"Queue" = "Transparent"
"IgnoreProjector" = "True"
"RenderType" = "Transparent"
}
Pass
{
Cull Off
Lighting Off
ZWrite Off
Offset -1, -1
Fog { Mode Off }
ColorMask RGB
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _MainTex;
float4 _MainTex_ST;
float _MinX;
float _MaxX;
float _Fill;
struct appdata_t
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
struct v2f
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
v2f vert (appdata_t v)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
o.texcoord = v.texcoord;
return o;
}
half4 frag (v2f IN) : COLOR
{
if ((IN.texcoord.x<_MinX)|| (IN.texcoord.x>(_MinX+_Fill*(_MaxX-_MinX))))
{
half4 colorTransparent = half4(0,0,0,0) ;
return colorTransparent ;
}
return tex2D(_MainTex, IN.texcoord);
}
ENDCG
}
}
}
To use this shader, you need to create a material that uses it, then use that material in the SpriteRenderer, you can change the Fill amount, MinX , and MaxX from the inspector, or call the spriteRenderer.material.setFloat(property, value) from code.
I then faced another issue with animated sprites, I had to keep updating the MinX and MaxX on every frame, but when I did it in the Update function the animation started flickering, and thats because the update was being called after the sprite is rendered, so I had to use the Main Camera OnPreRender event to update the material properties.
Maybe there is a better way to achieve all this but this is the best I could come up with, and I hope this will benefit someone trying to achieve the same effect.
I have improve Khaled-AbuA-lkheir shader adding base sprite color:
Shader "Sprites/ClipArea"
{
Properties
{
_MainTex ("Base (RGB), Alpha (A)", 2D) = "white" {}
_Fill ("Fill", Range(0.0, 1.0)) = 1.0
_MinX ("MinX", Float) = 0
_MaxX ("MaxX", Float) = 1
}
SubShader
{
LOD 200
Tags
{
"Queue" = "Transparent"
"IgnoreProjector" = "True"
"RenderType" = "Transparent"
}
Pass
{
Cull Off
Lighting Off
ZWrite Off
Offset -1, -1
Fog { Mode Off }
ColorMask RGB
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _MainTex;
float4 _MainTex_ST;
float _MinX;
float _MaxX;
float _Fill;
struct appdata_t
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
fixed4 color : COLOR;
};
struct v2f
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
fixed4 color : COLOR;
};
v2f vert (appdata_t v)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
o.texcoord = v.texcoord;
o.color = v.color;
return o;
}
half4 frag (v2f IN) : COLOR
{
if ((IN.texcoord.x<_MinX)|| (IN.texcoord.x>(_MinX+_Fill*(_MaxX-_MinX))))
{
half4 colorTransparent = half4(0,0,0,0) ;
return colorTransparent ;
}
else
{
return tex2D(_MainTex, IN.texcoord)*IN.color;
}
}
ENDCG
}
}
}
I'm necroing this because I think it can be useful:
I added the possibility to set _MinX greater than _MaxX.
That way, you have a "right to left" clipping, instead of "left to right".
Shader "Sprites/ClipArea"
{
Properties
{
_MainTex("Base (RGB), Alpha (A)", 2D) = "white" {}
_Fill("Fill", Range(0.0, 1.0)) = 1.0
_MinX("MinX", Float) = 0
_MaxX("MaxX", Float) = 1
}
SubShader
{
LOD 200
Tags
{
"Queue" = "Transparent"
"IgnoreProjector" = "True"
"RenderType" = "Transparent"
}
Pass
{
Cull Off
Lighting Off
ZWrite Off
Offset -1, -1
Fog { Mode Off }
ColorMask RGB
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _MainTex;
float4 _MainTex_ST;
float _MinX;
float _MaxX;
float _Fill;
struct appdata_t
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
fixed4 color : COLOR;
};
struct v2f
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
fixed4 color : COLOR;
};
v2f vert(appdata_t v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.texcoord = v.texcoord;
o.color = v.color;
return o;
}
half4 frag(v2f IN) : COLOR
{
if (_MinX < _MaxX && ((IN.texcoord.x < _MinX) || (IN.texcoord.x > (_MinX + _Fill * (_MaxX - _MinX)))) ||
_MinX > _MaxX && ((IN.texcoord.x > _MinX) || (IN.texcoord.x < (_MinX + _Fill * (_MaxX - _MinX)))))
{
return half4(0, 0, 0, 0);
}
else
{
return tex2D(_MainTex, IN.texcoord) * IN.color;
}
}
ENDCG
}
}
}