I ported a Plasma ball shader from Shadertoy to Unity as Image Effect which is attached to the camera. It works fine on the Editor and Windows standalone build. It does not work on Android devices. It is flashing blue and black images on Android.
Here is what it looks like in Unity Editor and Windows Build:
Here is what it looks like on Android:
The ported Shader code:
Shader "Hidden/Plasma Space Ball Image Effect"
{
Properties
{
iChannel0("iChannel0", 2D) = "white" {}
//[MaterialToggle] _isToggled("isToggle", Float) = 0
}
SubShader
{
// No culling or depth
Cull Off ZWrite Off ZTest Always
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
sampler2D iChannel0;
//Ported from https://www.shadertoy.com/view/MstXzf
float3 hb(float2 pos, float t, float time, float2 rot, float size, sampler2D tex0)
{
float2 newUv = 0.2*(pos / (1.2 - t) + 0.5*time*rot);
//float texSample = texture(tex0, newUv).b;
float texSample = tex2D(tex0, newUv).b;
float uOff = 0.2*(texSample + 0.3*time); //lsf3RH
float2 starUV = newUv + float2(uOff, 0.0);
//return float3(0.3, 0.3, 1.0) + 1.3*texture(tex0, starUV).b;
return float3(0.3, 0.3, 1.0) + 1.3*tex2D(tex0, starUV).b;
}
float4 blob(float2 uv, float size, float time, sampler2D tex0)
{
float2 center = float2(0., 0.);
float2 pos = center - uv;
float t = length(pos);
float st = size - t;
float2 rot = 0.005*float2(sin(time / 16.), sin(time / 12.)); //MslGWN
float alpha = smoothstep(0.0, 0.2*size, st);
float3 col = hb(pos, t, time, rot, size, tex0);
float a1 = smoothstep(-1.4, -1.0, -col.b);
col = lerp(col, hb(pos, t, -time, -rot, size, tex0), a1);
col += 0.8*exp(-12.*abs(t - 0.8*size) / size);
float a2 = smoothstep(-1.4, -1.0, -col.b);
alpha -= a2;
//float crosshair = float((abs(pos.x) < 0.005 && abs(pos.y) < 0.15) || (abs(pos.y) < 0.005&&abs(pos.x) < 0.15));
//return float4(col, alpha) + crosshair;
return float4(col, alpha);
}
float4 main_(float2 uv, float size)
{
return blob(uv, size, _Time.y, iChannel0);
}
fixed4 frag(v2f i) : SV_Target
{
float4 fragColor = 0;
float2 fragCoord = i.vertex.xy;
///---------------------------------------------------
float2 uv = fragCoord.xy / _ScreenParams.xy;
float2 cr = uv*2. - 1.;
cr.x *= _ScreenParams.x / _ScreenParams.y;
//late addition to elaborate background motion, could be reused later on
float2 rot = 0.5*float2(sin(_Time.y / 16.), sin(_Time.y / 12.));
float4 ball = clamp(main_(cr, sin(_Time.y)*0.05 + 0.5 + 0.5), 0., 1.);
//float3 bg = float3(0.7, 0.7, 1.0)*texture(iChannel0, uv + rot + 0.1*ball.rb).b;
float3 bg = float3(0.7, 0.7, 1.0)*tex2D(iChannel0, uv + rot + 0.1*ball.rb).b;
//simulated gl blend
fragColor = float4(lerp(bg, ball.rgb, ball.a), 1.0);
//fragColor = lerp(fragColor,tex2D(iChannel0, i.uv).rgba,.5);
return fragColor;
}
ENDCG
}
}
}
You can find the image that is used for the iChannel0 input slot here in the Shader above.
Things I've tried:
Adding the shader to the Graphics Settings so that Unity will include
it in during build process.
Disabling Auto Graphics API and trying OpenGLES2 and OpenGLES3.
Checking the log with Android Studio. No error/warning at-all.
None of these solved the problem and I ran out of things to try.
Software and Device Info if that helps:
Unity 5.6.0f3
Android 4.4.2
This is used for learning and educational purposes as I am studying GLSL, HLSL, CG/shaderlab shader language. I just want to know why the ported shader is not working as expected on Android devices.
Why is it flashing blue and black images on Android?
You need to use the VPOS semantic for positions in the fragment shader for OpenGLES2.
From Unity docs:
A fragment shader can receive position of the pixel being rendered as
a special VPOS semantic. This feature only exists starting with shader
model 3.0, so the shader needs to have the #pragma target 3.0
compilation directive.
So to get screen space positions:
// note: no SV_POSITION in this struct
struct v2f {
float2 uv : TEXCOORD0;
};
v2f vert (
float4 vertex : POSITION, // vertex position input
float2 uv : TEXCOORD0, // texture coordinate input
out float4 outpos : SV_POSITION // clip space position output
)
{
v2f o;
o.uv = uv;
outpos = UnityObjectToClipPos(vertex);
return o;
}
fixed4 frag (v2f i, UNITY_VPOS_TYPE screenPos : VPOS) : SV_Target
{
// screenPos.xy will contain pixel integer coordinates.
float4 fragColor = 0;
float2 fragCoord = screenPos;
But you already pass in uvs so maybe you can use those?
float2 uv = i.uv;
It turns out I was wrong. You dont get clip space positions in the fragment shader in OpenGLES2 you get .. 0. (Maybe someone can explain this?)
I made a small test shader:
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
float4 vert (float4 vertex : POSITION) : SV_Position
{
return UnityObjectToClipPos(vertex);
}
fixed4 frag (float4 screenPos : SV_Position) : SV_Target
{
float uvx = screenPos.x/_ScreenParams.x;
return float4(uvx, 0., 0., 1.);
}
ENDCG
and the line float uvx = screenPos.x/_ScreenParams.x; gets compiled as tmpvar_2.x = (0.0 / _ScreenParams.x); // OpenGLES2
u_xlat0 = gl_FragCoord.x / _ScreenParams.x; // OpenGLES3
But if you use the VPOS semantic
fixed4 frag (float4 screenPos : VPOS) : SV_Target the same line gets compiled as
tmpvar_2.x = (gl_FragCoord.x / _ScreenParams.x); // OpenGLES2
u_xlat0 = gl_FragCoord.x / _ScreenParams.x; // OpenGLES3
So for OpenGLES2 it looks like you need to use the VPOS semantic to get positions in screen space in the fragment shader.
Related
I'm on v2019.4.30f1, am using the old render pipeline (i.e. not urp nor hdrp) and using the blood decals from this pack: https://assetstore.unity.com/packages/vfx/particles/volumetric-blood-fluids-173863
The blood decals are done at runtime, like when a play dies some blood spurts out and the decal is created.
Here is how the decal looks at FOV 20, and this is good, how I expect it to look:
Next if I merely change the camera FOV to 15 you'll see something goes really weird with the decal. There are parts that look like they are in the wrong place, almost like some parts have been shifted either up or left or whatever and some parts have been clipped off. But then other parts seems like they are still in the correct place:
If I switch back to FOV 20 again it looks normal again, so it doesn't seem like anything is changing with the decal itself when I change FOV, just that viewed from different FOV it looks completely different.
Here is a screenshot from scene view, showing the decal as well as some inspector info about it:
I've been tearing my hair out this entire week over this and can't seem to figure it out :/
Any advice would be much appreciated. Of course I can provide any other info required if there isn't enough to go on here.
edit: here is the decal shader
Shader "KriptoFX/BFX/BFX_Decal"
{
Properties
{
[HDR] _TintColor("Tint Color", Color) = (1,1,1,1)
_MainTex("NormalAlpha", 2D) = "white" {}
_LookupFade("Lookup Fade Texture", 2D) = "white" {}
_Cutout("Cutout", Range(0, 1)) = 1
_CutoutTex("CutoutDepth(XZ)", 2D) = "white" {}
[Space]
_SunPos("Sun Pos", Vector) = (1, 0.5, 1, 0)
}
SubShader
{
Tags{ "Queue" = "AlphaTest"}
Blend DstColor SrcColor
//Blend SrcAlpha OneMinusSrcAlpha
Cull Front
ZTest Always
ZWrite Off
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fog
#pragma multi_compile_instancing
#pragma multi_compile _ USE_CUSTOM_DECAL_LAYERS
#include "UnityCG.cginc"
sampler2D _MainTex;
sampler2D _Flowmap;
sampler2D _LookupFade;
sampler2D _CutoutTex;
float4 _MainTex_ST;
float4 _MainTex_NextFrame;
float4 _CutoutTex_ST;
UNITY_INSTANCING_BUFFER_START(Props)
UNITY_DEFINE_INSTANCED_PROP(half4, _TintColor)
UNITY_DEFINE_INSTANCED_PROP(half, _Cutout)
UNITY_DEFINE_INSTANCED_PROP(float, _LightIntencity)
UNITY_INSTANCING_BUFFER_END(Props)
half4 _CutoutColor;
half4 _FresnelColor;
half4 _DistortionSpeedScale;
sampler2D _CameraDepthTexture;
sampler2D _LayerDecalDepthTexture;
half InterpolationValue;
half _AlphaPow;
half _DistortSpeed;
half _DistortScale;
float4 _SunPos;
half _DepthMul;
struct appdata_t {
float4 vertex : POSITION;
float4 normal : NORMAL;
half4 color : COLOR;
UNITY_VERTEX_INPUT_INSTANCE_ID
};
struct v2f {
float4 vertex : SV_POSITION;
half4 color : COLOR;
float4 screenUV : TEXCOORD0;
float3 ray : TEXCOORD1;
float3 viewDir : TEXCOORD2;
float4 screenPos : TEXCOORD3;
UNITY_FOG_COORDS(4)
UNITY_VERTEX_INPUT_INSTANCE_ID
UNITY_VERTEX_OUTPUT_STEREO
};
v2f vert(appdata_t v)
{
v2f o;
UNITY_SETUP_INSTANCE_ID(v);
UNITY_TRANSFER_INSTANCE_ID(v, o);
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o);
o.vertex = UnityObjectToClipPos(v.vertex);
o.color = v.color;
o.ray = UnityObjectToViewPos(v.vertex) * float3(-1, -1, 1);
o.screenUV = ComputeScreenPos(o.vertex);
o.viewDir = normalize(ObjSpaceViewDir(v.vertex));
o.screenPos = ComputeGrabScreenPos(o.vertex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
half4 frag(v2f i) : SV_Target
{
UNITY_SETUP_INSTANCE_ID(i);
i.ray *= (_ProjectionParams.z / i.ray.z);
#if USE_CUSTOM_DECAL_LAYERS
float depth = Linear01Depth(tex2Dproj(_LayerDecalDepthTexture, i.screenUV));
float depthMask = Linear01Depth(tex2Dproj(_CameraDepthTexture, i.screenUV));
float fade = 1- saturate(100000 * (depth - depthMask));
#else
float depth = Linear01Depth(tex2Dproj(_CameraDepthTexture, i.screenUV));
#endif
float3 wpos = mul(unity_CameraToWorld, float4(i.ray * depth, 1)).xyz;
float3 opos = mul(unity_WorldToObject, float4(wpos, 1)).xyz;
float3 stepVal = saturate((0.5 - abs(opos.xyz)) * 10000);
half lookupHeight = tex2D(_LookupFade, float2(opos.y + 0.5, 0));
float projClipFade = stepVal.x * stepVal.y * stepVal.z * lookupHeight;
#if USE_CUSTOM_DECAL_LAYERS
projClipFade *= fade;
#endif
float2 uv = opos.xz + 0.5;
float2 uvMain = uv * _MainTex_ST.xy + _MainTex_ST.zw;
float2 uvCutout = (opos.xz + 0.5) * _CutoutTex_ST.xy + _CutoutTex_ST.zw;
half4 normAlpha = tex2D(_MainTex, uvMain);
half4 res = 0;
res.a = saturate(normAlpha.w * 2);
if (res.a < 0.1) discard;
normAlpha.xy = normAlpha.xy * 2 - 1;
float3 normal = normalize(float3(normAlpha.x, 1, normAlpha.y));
half3 mask = tex2D(_CutoutTex, uvCutout).xyz;
half cutout = 0.5 + UNITY_ACCESS_INSTANCED_PROP(Props, _Cutout) * i.color.a * 0.5;
half alphaMask = saturate((mask.r - (cutout * 2 - 1)) * 20) * res.a;
half colorMask = saturate((mask.r - (cutout * 2 - 1)) * 5) * res.a;
res.a = alphaMask;
res.a = saturate(res.a * projClipFade);
float intencity = UNITY_ACCESS_INSTANCED_PROP(Props, _LightIntencity);
float light = max(0.001, dot(normal, normalize(_SunPos.xyz)));
light = pow(light, 150) * 3 * intencity;
light *= (1 - mask.z * colorMask);
float4 tintColor = UNITY_ACCESS_INSTANCED_PROP(Props, _TintColor);
#if !UNITY_COLORSPACE_GAMMA
tintColor = tintColor * 1.35;
#endif
res.rgb = lerp(tintColor.rgb, tintColor.rgb * 0.25, mask.z * colorMask) + light;
half fresnel = (1 - dot(normal, normalize(i.viewDir)));
fresnel = pow(fresnel + 0.1, 5);
UNITY_APPLY_FOG_COLOR(i.fogCoord, res, half4(1, 1, 1, 1));
return lerp(0.5, res, res.a);
return res;
}
ENDCG
}
}
}
Comparing two pictures, I see that BFXShaderAnamation does some precomputation in the beginning of animation, and the results depend on camera settings and geometry. You should look for the code that does this initialization and call it again each time your camera settings or geometry change.
Idea: check if the decal plugin creates an additional hidden camera to do some computations (look into the hierarchy and search by "Camera" type).
I'm currently stuck in a shader that i'm writing.
I'm trying to create a rain shader.
I have set up 3 particle system which simulates rain, and a
camera to look at this simulation. The camera view is what I
use as texture. In my shader I am now trying to make a normal map
from that texture map, but I don't know how to do it.
Shader "Unlit/Rain"
{
Properties
{
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Color("Color", Color) = (1,1,1,1)
_NormalIntensity("NormalIntensity",Float) = 1
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#include "Lighting.cginc"
#include "AutoLight.cginc"
struct VertexInput {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float4 normal : NORMAL;
float3 tangent : TANGENT;
};
struct VertexOutput {
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float2 uv1 : TEXCOORD1;
float4 normals : NORMAL;
float3 tangentSpaceLight: TANGENT;
};
sampler2D _MainTex;
float4 _MainTex_ST;
half4 _Color;
float _NormalIntensity;
VertexOutput vert(VertexInput v) {
VertexOutput o;
o.normals = v.normal;
o.uv1 = v.uv;
o.vertex = UnityObjectToClipPos( v.vertex );
// o.uv = TRANSFORM_TEX( v.uv, _MainTex ); // used for texture
return o;
}
float4 frag(VertexOutput i) : COLOR{
float4 col2 = tex2D(_MainTex, i.uv1);
return col2 * i.normals * 5;
}
ENDCG
}
}
}
This is what the camera sees. I set the TargetTexture for this camera to be a texture I created.
In my shader I then put that texture as an albedo property
So what I wanna do is now find the normal for that texture to create a bumpmap.
It looks like your "TargetTexture" is giving you back a height map. Here is a post I found about how to turn a height map into a normal map. I've mashed the code you had originally together with the core of that forum post and output the normals as color so you can test and see how this works:
Shader "Unlit/HeightToNormal"
{
Properties
{
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Color("Color", Color) = (1,1,1,1)
_NormalIntensity("NormalIntensity",Float) = 1
_HeightMapSizeX("HeightMapSizeX",Float) = 1024
_HeightMapSizeY("HeightMapSizeY",Float) = 1024
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#include "Lighting.cginc"
#include "AutoLight.cginc"
struct VertexInput {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float4 normal : NORMAL;
float3 tangent : TANGENT;
};
struct VertexOutput {
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float2 uv1 : TEXCOORD1;
float4 normals : NORMAL;
//float3 tangentSpaceLight: TANGENT;
};
sampler2D _MainTex;
float4 _MainTex_ST;
half4 _Color;
float _NormalIntensity;
float _HeightMapSizeX;
float _HeightMapSizeY;
VertexOutput vert(VertexInput v) {
VertexOutput o;
o.uv = TRANSFORM_TEX( v.uv, _MainTex ); // used for texture
o.uv1 = v.uv;
o.normals = v.normal;
o.vertex = UnityObjectToClipPos(v.vertex);
return o;
}
float4 frag(VertexOutput i) : COLOR
{
float me = tex2D(_MainTex,i.uv1).x;
float n = tex2D(_MainTex,float2(i.uv1.x, i.uv1.y + 1.0 / _HeightMapSizeY)).x;
float s = tex2D(_MainTex,float2(i.uv1.x, i.uv1.y - 1.0 / _HeightMapSizeY)).x;
float e = tex2D(_MainTex,float2(i.uv1.x + 1.0 / _HeightMapSizeX,i.uv1.y)).x;
float w = tex2D(_MainTex,float2(i.uv1.x - 1.0 / _HeightMapSizeX,i.uv1.y)).x;
// defining starting normal as color has some interesting effects, generally makes this more flexible
float3 norm = _Color;
float3 temp = norm; //a temporary vector that is not parallel to norm
if (norm.x == 1)
temp.y += 0.5;
else
temp.x += 0.5;
//form a basis with norm being one of the axes:
float3 perp1 = normalize(cross(i.normals,temp));
float3 perp2 = normalize(cross(i.normals,perp1));
//use the basis to move the normal i its own space by the offset
float3 normalOffset = -_NormalIntensity * (((n - me) - (s - me)) * perp1 + ((e - me) - (w - me)) * perp2);
norm += normalOffset;
norm = normalize(norm);
// it's also interesting to output temp, perp1, and perp1, or combinations of the float samples.
return float4(norm, 1);
}
ENDCG
}
}
}
To generate a normal map from a height map, you're trying to use the oriented rate of change in your height map to come up with a normal vector which can be represented using 3 float values (or color channels, if it's an image). You can sample the point of image you are on, and then small steps away from that point in four cardinal directions. Using the cross product to guarantee orthogonality you can define a basis. Using your oriented steps on your image, you can scale the two basis vectors and add them together to find a "normal offset", which is the 3D representation approximating the oriented change in value on your heightmap. Basically it's your normal.
You can see the effects of me playing with normal intensity here, and the "normal color" here. When this looks right for your use case, you can try using normals as normals instead of colored output.
Some tweaking of values will probably still be required. Good luck!
I have a shader that allows me to create and rotate a 2 or 3 color gradient. My problem was that it was very heavy on the GPU, so I moved this part of the code from the fragment shader to the vertex shader:
fixed4 frag (v2f i) : SV_Target
{
//STARTS HERE
float2 uv = - (i.screenPos.xy / i.screenPos.w - 0.5)*2;
fixed3 c;
#if _BG_COLOR_GRADIENT2
c = lerp(_BgColor1,_BgColor3,clampValue(rotateUV(uv.xy,_BgColorRotation*PI).y,_BgColorPosition));
#elif _BG_COLOR_GRADIENT3
c = lerp3(_BgColor1,_BgColor2,_BgColor3,clampValue(rotateUV(uv.xy,_BgColorRotation*PI).y,_BgColorPosition),_BgColorPosition3);
#endif
//ENDS HERE
return fixed4(c, i.color.a);
}
Now my shader looks like this:
Shader "Custom/Gradient"
{
Properties
{
[KeywordEnum(Gradient2, Gradient3)] _BG_COLOR ("Color Type", Float) = 1
_Color("Color", Color) = (1, 1, 1, 1)
_BgColor1 ("Start Color",Color) = (0.667,0.851,0.937,1)
_BgColor2 ("Middle Color",Color) = (0.29, 0.8, 0.2,1)
_BgColor3 ("End Color",Color) = (0.29, 0.8, 0.2,1)
[GradientPositionSliderDrawer]
_BgColorPosition ("Gradient Position",Vector) = (0,1,0)
_BgColorRotation ("Gradient Rotation",Range(0,2)) = 0
_BgColorPosition3 ("Middle Size",Range(0,1)) = 0
}
SubShader
{
Tags{ "Queue" = "Background" "IgnoreProjectors"="True" }
Blend SrcAlpha OneMinusSrcAlpha
AlphaTest Greater .01
ColorMask RGB
Cull Off Lighting Off ZWrite Off
BindChannels {
Bind "Color", color
Bind "Vertex", vertex
Bind "TexCoord", texcoord
}
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma shader_feature _BG_COLOR_GRADIENT2 _BG_COLOR_GRADIENT3
#include "UnityCG.cginc"
#include "GradientHelper.cginc"
struct appdata
{
float4 vertex : POSITION;
fixed4 color : COLOR;
};
struct v2f
{
float4 pos : SV_POSITION;
float4 screenPos : TEXCOORD4;
fixed4 color : COLOR;
};
fixed4 _BgColor1;
fixed4 _BgColor2;
fixed4 _BgColor3;
float _BgColorRotation;
float2 _BgColorPosition;
float _BgColorPosition3;
float4 _Color;
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.screenPos = ComputeScreenPos(o.pos);
float2 uv = - (o.screenPos.xy / o.screenPos.w - 0.5)*2;
#if _BG_COLOR_GRADIENT2
o.color = lerp(_BgColor1,_BgColor3,clampValue(rotateUV(uv.xy,_BgColorRotation*PI).y,_BgColorPosition)) * v.color;
#elif _BG_COLOR_GRADIENT3
o.color = lerp3(_BgColor1,_BgColor2,_BgColor3,clampValue(rotateUV(uv.xy,_BgColorRotation*PI).y,_BgColorPosition),_BgColorPosition3) * v.color;
#endif
return o;
}
fixed4 frag (v2f i) : COLOR {
return i.color;
}
ENDCG
}
}
CustomEditor "Background.Editor.BackgroundGradientEditor"
}
(Here is my shader helper):
#ifndef PI
#define PI 3.141592653589793
#endif
#ifndef HALF_PI
#define HALF_PI 1.5707963267948966
#endif
// Helper Funtions
inline float clampValue(float input, float2 limit)
{
float minValue = 1-limit.y;
float maxValue = 1-limit.x;
if(input<=minValue){
return 0;
} else if(input>=maxValue){
return 1;
} else {
return (input - minValue )/(maxValue-minValue);
}
}
inline float2 rotateUV(fixed2 uv, float rotation)
{
float sinX = sin (rotation);
float cosX = cos (rotation);
float2x2 rotationMatrix = float2x2(cosX, -sinX, sinX, cosX);
return mul ( uv, rotationMatrix )/2 + 0.5;
}
inline fixed4 lerp3(fixed4 a, fixed4 b, fixed4 c, float pos, float size){
float ratio2 = 0.5+size*0.5;
float ratio1 = 1-ratio2;
if(pos<ratio1)
return lerp(a,b,pos/ratio1);
else if(pos>ratio2)
return lerp(b,c,(pos-ratio2)/ratio1);
else
return b;
}
#endif
The performance is great now, but the rotation is totally messed up (most noticeable on the 3 color gradient) and I can't seem to figure it out why.
I never understand why people want to make their gradients inside the shader, it is quite limited and not necessarily more performant unless you are changing the values every frame. My best solution for this would be to generate the gradient as a texture on the CPU, with the size 1x128. Use the Gradient class which is provided by Unity, and loop:
Texture2D texture = new Texture2D(128, 1);
Color[] pixels = Color[128];
for (int i = 0; i < 128; i++) {
pixels[i] = gradient.Evaluate(i/127f);
}
texture.SetPixels(pixels);
texture.Apply();
Send it to the shader using:
material.SetTexture("_Gradient", texture)
Then, you can rotate and scroll along this texture all you want using a 2x2 matrix like you did. Just make sure to set texture overflow mode to clamp and not repeat. Remember that you can implement OnValidate() into your behavior to apply value updates in the editor, if you need to update it in build though, you will need to listen to changes some other way.
Using vertex colors would indeed be useful for gradients, since these are interpolated in the hardware... but from my understanding, this is a screen-space effect, and as such you would need the vertices to line up with the actual gradient bands.
This will be my second question regarding my ongoing VR project.
I am making a photosphere cardboard app.
I have added a sphere and inside of it, I have placed the Main camera on 0,0,0 position.
Inside of the cube I have placed a 3d cube. If user gazes towards it for some specific seconds, the equirectangular textures on the sphere will change via script.
Now I am using the GvrReticlePointer in this project, and I can't seem to find the white dot anywhere except when it is on the 3d cube.
I would really like to know what is causing the reticle to disappear on the scene and how to fix it.
Thanks in advance.
Answering this question myself!
Just use this shader. Works like a charm!
Shader "Custom/Equirectangular" {
Properties {
_Color ("Main Color", Color) = (1,1,1,1)
_MainTex ("Diffuse (RGB) Alpha (A)", 2D) = "gray" {}
}
SubShader{
Pass {
Cull Front
Tags {"LightMode" = "Always"}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
#pragma glsl
#pragma target 3.0
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct v2f
{
float4 pos : SV_POSITION;
float3 normal : TEXCOORD0;
};
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.normal = v.normal;
return o;
}
sampler2D _MainTex;
#define PI 3.141592653589793
inline float2 RadialCoords(float3 a_coords)
{
float3 a_coords_n = normalize(a_coords);
float lon = atan2(a_coords_n.z, a_coords_n.x);
float lat = acos(a_coords_n.y);
float2 sphereCoords = float2(lon, lat) * (1.0 / PI);
//return float2(sphereCoords.x * 0.5 + 0.5, 1 - sphereCoords.y);
return float2(1 - (sphereCoords.x * 0.5 + 0.5), 1 - sphereCoords.y);
}
float4 frag(v2f IN) : COLOR
{
float2 equiUV = RadialCoords(IN.normal);
return tex2D(_MainTex, equiUV);
}
ENDCG
}
}
FallBack "VertexLit"
}
I am working on my water shader but I have run into some problems. It looks like my normals aren't getting the same offset as my vertexes. You can see a white plane belof my water where the shadow of the water is cast upon, exept another white plane which is not a object but most likely the normals of my water mesh that didn't move block a part of the shadow. PLEASE I really need some help with this can't find anyone who knows what this is.
This is my code:
Shader "Custom/NoobShader_04" {
Properties {
_Color ("Color", Color) = (0,0.55,0.83,1)
_Diffuse ("Diffuse Map", 2D) = "white" {}
_Displacement ("Displacement Map", 2D) = "white" {}
_Scale ("Wave Scale", float) = 0.7
_Frequency ("Frequency", float) = 0.6
_Speed ("Speed", float) = 0.5
}
SubShader {
Pass{
Tags { "LightMode" = "ForwardBase"}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
float4 _Color;
sampler2D _Displacement;
sampler2D _Diffuse;
float _Scale;
float _Frequency;
float _Speed;
float4 _LightColor0;
struct VertexOutput
{
float4 pos : SV_POSITION;
float3 nor : NORMAL;
float4 col : COLOR;
float4 tex : TEXCOORD0;
};
struct VertexInput
{
float4 vertex : POSITION;
float3 normal : NORMAL;
float4 texcoord : TEXCOORD0;
};
struct FragmentOutput
{
float4 color : COLOR;
};
VertexOutput vert (VertexInput i)
{
VertexOutput VOUT;
float4 disp = tex2Dlod(_Displacement, float4(i.texcoord.x * _Frequency + (_Time.x * _Speed), i.texcoord.y * _Frequency + (_Time.x * _Speed),0.0,0.0));
float4 newPos = i.vertex;
float3 newNor = i.normal;
newPos.y += _Scale * disp.y;
newNor.y += _Scale * disp.y;
VOUT.nor = newNor;
VOUT.pos = mul(UNITY_MATRIX_MVP,newPos);
VOUT.tex = i.texcoord;
float3 normalDirection = normalize( mul(float4(newNor,0.0),_World2Object).xyz);
float3 lightDirection = normalize(_WorldSpaceLightPos0.xyz);
float atten = 1.0;
float3 diffuseRefflection = atten * _LightColor0.xyz * _Color.rgb * max( 0.0, dot(normalDirection, lightDirection));
VOUT.col = float4(diffuseRefflection, 1.0);
return VOUT;
}
FragmentOutput frag(VertexOutput v)
{
FragmentOutput FOUT;
float4 tex = tex2D(_Diffuse,float4(v.tex.x * _Frequency + (_Time.x * _Speed), v.tex.y * _Frequency + (_Time.x * _Speed),0.0,0.0));
FOUT.color = tex * v.col + UNITY_LIGHTMODEL_AMBIENT.xyzw;
return FOUT;
}
ENDCG
}
}
FallBack "Diffuse"
}
You can see a white plane belof my water where the shadow of the water
is cast upon, exept another white plane which is not a object but most
likely the normals of my water mesh that didn't move block a part of
the shadow.
I tried your shader and I can't see any plane, except the one I add to a scene to receive the casted shadow. In any case, water's normals don't have anything to do with the plane.
It looks like my normals aren't getting the same offset as my
vertexes
Again, not sure what you mean here. Offset has no effect on normals, because they express an orientation, not a position.
If you mean that the projected shadow on the plane, doesn't account for the vertex offset, it's because the auto generated shadow caster pass, can't take in consideration the vertex offset. So you probably need to explicitly implement it.
Something like:
Pass
{
Name "ShadowCaster"
Tags { "LightMode" = "ShadowCaster" }
Fog {Mode Off}
ZWrite On ZTest Less Cull Off
Offset 1, 1
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
#pragma multi_compile_shadowcaster
#include "UnityCG.cginc"
v2f vert( appdata_full v )
{
v2f o;
//TRANSFER_SHADOW_CASTER(o) this is how default shadow are casted
o.pos = ...;// put here your calculation taking into account the vertex offset. Basically repeating the same calculation you wrote for forward pass in regards to vertex position
return o;
}
float4 frag( v2f i ) : COLOR
{
fixed4 texcol = tex2D( _MainTex, i.uv );
clip( texcol.a - _Cutoff );
SHADOW_CASTER_FRAGMENT(i)
}
ENDCG
}