I'm on v2019.4.30f1, am using the old render pipeline (i.e. not urp nor hdrp) and using the blood decals from this pack: https://assetstore.unity.com/packages/vfx/particles/volumetric-blood-fluids-173863
The blood decals are done at runtime, like when a play dies some blood spurts out and the decal is created.
Here is how the decal looks at FOV 20, and this is good, how I expect it to look:
Next if I merely change the camera FOV to 15 you'll see something goes really weird with the decal. There are parts that look like they are in the wrong place, almost like some parts have been shifted either up or left or whatever and some parts have been clipped off. But then other parts seems like they are still in the correct place:
If I switch back to FOV 20 again it looks normal again, so it doesn't seem like anything is changing with the decal itself when I change FOV, just that viewed from different FOV it looks completely different.
Here is a screenshot from scene view, showing the decal as well as some inspector info about it:
I've been tearing my hair out this entire week over this and can't seem to figure it out :/
Any advice would be much appreciated. Of course I can provide any other info required if there isn't enough to go on here.
edit: here is the decal shader
Shader "KriptoFX/BFX/BFX_Decal"
{
Properties
{
[HDR] _TintColor("Tint Color", Color) = (1,1,1,1)
_MainTex("NormalAlpha", 2D) = "white" {}
_LookupFade("Lookup Fade Texture", 2D) = "white" {}
_Cutout("Cutout", Range(0, 1)) = 1
_CutoutTex("CutoutDepth(XZ)", 2D) = "white" {}
[Space]
_SunPos("Sun Pos", Vector) = (1, 0.5, 1, 0)
}
SubShader
{
Tags{ "Queue" = "AlphaTest"}
Blend DstColor SrcColor
//Blend SrcAlpha OneMinusSrcAlpha
Cull Front
ZTest Always
ZWrite Off
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fog
#pragma multi_compile_instancing
#pragma multi_compile _ USE_CUSTOM_DECAL_LAYERS
#include "UnityCG.cginc"
sampler2D _MainTex;
sampler2D _Flowmap;
sampler2D _LookupFade;
sampler2D _CutoutTex;
float4 _MainTex_ST;
float4 _MainTex_NextFrame;
float4 _CutoutTex_ST;
UNITY_INSTANCING_BUFFER_START(Props)
UNITY_DEFINE_INSTANCED_PROP(half4, _TintColor)
UNITY_DEFINE_INSTANCED_PROP(half, _Cutout)
UNITY_DEFINE_INSTANCED_PROP(float, _LightIntencity)
UNITY_INSTANCING_BUFFER_END(Props)
half4 _CutoutColor;
half4 _FresnelColor;
half4 _DistortionSpeedScale;
sampler2D _CameraDepthTexture;
sampler2D _LayerDecalDepthTexture;
half InterpolationValue;
half _AlphaPow;
half _DistortSpeed;
half _DistortScale;
float4 _SunPos;
half _DepthMul;
struct appdata_t {
float4 vertex : POSITION;
float4 normal : NORMAL;
half4 color : COLOR;
UNITY_VERTEX_INPUT_INSTANCE_ID
};
struct v2f {
float4 vertex : SV_POSITION;
half4 color : COLOR;
float4 screenUV : TEXCOORD0;
float3 ray : TEXCOORD1;
float3 viewDir : TEXCOORD2;
float4 screenPos : TEXCOORD3;
UNITY_FOG_COORDS(4)
UNITY_VERTEX_INPUT_INSTANCE_ID
UNITY_VERTEX_OUTPUT_STEREO
};
v2f vert(appdata_t v)
{
v2f o;
UNITY_SETUP_INSTANCE_ID(v);
UNITY_TRANSFER_INSTANCE_ID(v, o);
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o);
o.vertex = UnityObjectToClipPos(v.vertex);
o.color = v.color;
o.ray = UnityObjectToViewPos(v.vertex) * float3(-1, -1, 1);
o.screenUV = ComputeScreenPos(o.vertex);
o.viewDir = normalize(ObjSpaceViewDir(v.vertex));
o.screenPos = ComputeGrabScreenPos(o.vertex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
half4 frag(v2f i) : SV_Target
{
UNITY_SETUP_INSTANCE_ID(i);
i.ray *= (_ProjectionParams.z / i.ray.z);
#if USE_CUSTOM_DECAL_LAYERS
float depth = Linear01Depth(tex2Dproj(_LayerDecalDepthTexture, i.screenUV));
float depthMask = Linear01Depth(tex2Dproj(_CameraDepthTexture, i.screenUV));
float fade = 1- saturate(100000 * (depth - depthMask));
#else
float depth = Linear01Depth(tex2Dproj(_CameraDepthTexture, i.screenUV));
#endif
float3 wpos = mul(unity_CameraToWorld, float4(i.ray * depth, 1)).xyz;
float3 opos = mul(unity_WorldToObject, float4(wpos, 1)).xyz;
float3 stepVal = saturate((0.5 - abs(opos.xyz)) * 10000);
half lookupHeight = tex2D(_LookupFade, float2(opos.y + 0.5, 0));
float projClipFade = stepVal.x * stepVal.y * stepVal.z * lookupHeight;
#if USE_CUSTOM_DECAL_LAYERS
projClipFade *= fade;
#endif
float2 uv = opos.xz + 0.5;
float2 uvMain = uv * _MainTex_ST.xy + _MainTex_ST.zw;
float2 uvCutout = (opos.xz + 0.5) * _CutoutTex_ST.xy + _CutoutTex_ST.zw;
half4 normAlpha = tex2D(_MainTex, uvMain);
half4 res = 0;
res.a = saturate(normAlpha.w * 2);
if (res.a < 0.1) discard;
normAlpha.xy = normAlpha.xy * 2 - 1;
float3 normal = normalize(float3(normAlpha.x, 1, normAlpha.y));
half3 mask = tex2D(_CutoutTex, uvCutout).xyz;
half cutout = 0.5 + UNITY_ACCESS_INSTANCED_PROP(Props, _Cutout) * i.color.a * 0.5;
half alphaMask = saturate((mask.r - (cutout * 2 - 1)) * 20) * res.a;
half colorMask = saturate((mask.r - (cutout * 2 - 1)) * 5) * res.a;
res.a = alphaMask;
res.a = saturate(res.a * projClipFade);
float intencity = UNITY_ACCESS_INSTANCED_PROP(Props, _LightIntencity);
float light = max(0.001, dot(normal, normalize(_SunPos.xyz)));
light = pow(light, 150) * 3 * intencity;
light *= (1 - mask.z * colorMask);
float4 tintColor = UNITY_ACCESS_INSTANCED_PROP(Props, _TintColor);
#if !UNITY_COLORSPACE_GAMMA
tintColor = tintColor * 1.35;
#endif
res.rgb = lerp(tintColor.rgb, tintColor.rgb * 0.25, mask.z * colorMask) + light;
half fresnel = (1 - dot(normal, normalize(i.viewDir)));
fresnel = pow(fresnel + 0.1, 5);
UNITY_APPLY_FOG_COLOR(i.fogCoord, res, half4(1, 1, 1, 1));
return lerp(0.5, res, res.a);
return res;
}
ENDCG
}
}
}
Comparing two pictures, I see that BFXShaderAnamation does some precomputation in the beginning of animation, and the results depend on camera settings and geometry. You should look for the code that does this initialization and call it again each time your camera settings or geometry change.
Idea: check if the decal plugin creates an additional hidden camera to do some computations (look into the hierarchy and search by "Camera" type).
Related
I'd like to state right away I don't work with shaders that often, and the basic one I'm playing with - a force field shader from a thread in Unity's forums, is a much older version. However, the original "almost" does what I want it to.
I would like to modify it to add scrolling effects in order to simulate speed, however strangely I'm getting "unrecognized identifier 'Input'". IN Input is a normal variable.. supposedly, in Unity's shaders. Here is my code thus far:
Shader "Custom/SubspaceLook" {
Properties {
_Color ("Main Color", Color) = (1,1,1,0.5)
_MainTex ("Texture", 2D) = "white" {}
_UVScale ("UV Scale", Range (0.05, 4)) = 1
_UVDistortion ("UV Distortion", Range (0.01, 1)) = 0.5
_Rate ("Oscillation Rate", Range (5, 200)) = 10
_Rate2 ("Oscillation Rate Difference", Range (1, 3)) = 1.43
_ZPhase ("Z Phase", Range (0, 3)) = 0.5
_Scale ("Scale", Range (0.02, 2)) = 0.5
_Distortion ("Distortion", Range (0, 20)) = 0.4
_ScrollSpeedX("Scroll X", Range(0, 10)) = 2
_ScrollSpeedY("Scroll Y", Range(0, 10)) = 3
}
SubShader {
ZWrite Off
Tags { "Queue" = "Transparent" }
Blend One One
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_fog_exp2
#include "UnityCG.cginc"
float4 _Color;
sampler2D _MainTex;
float _Rate;
float _Rate2;
float _Scale;
float _Distortion;
float _ZPhase;
float _UVScale;
float _UVDistortion;
float _ScrollSpeedX;
float _ScrollSpeedY;
struct v2f {
float4 pos : SV_POSITION;
float3 uvsin : TEXCOORD0;
float3 vertsin : TEXCOORD1;
float2 uv : TEXCOORD2;
};
v2f vert (appdata_base v)
{
v2f o;
o.pos = UnityObjectToClipPos (v.vertex);
float s = 1 / _Scale;
float t = (_Time[0]*_Rate*_Scale) / _Distortion;
float2 uv = float2(v.vertex.y * 0.3 + (v.vertex.y - v.vertex.z *
0.0545), v.vertex.x + (v.vertex.z - v.vertex.x * 0.03165));
o.vertsin = sin((v.vertex.xyz + t) * s);
o.uvsin = sin((float3(uv, t * _ZPhase) + (t* _Rate2)) * s) *
_Distortion;
o.uv = uv;
return o;
}
half4 frag (v2f i) : COLOR
{
float3 vert = i.vertsin;
float3 uv = i.uvsin;
float mix = 1 + sin((vert.x - uv.x) + (vert.y - uv.y) + (vert.z -
uv.z));
float mix2 = 1 + sin((vert.x + uv.x) - (vert.y + uv.y) - (vert.z +
uv.z));
return half4( tex2D( _MainTex, (i.uv + (float2(mix, mix2) *
_UVDistortion)) * _UVScale ) * 1.5 * _Color);
}
void surf(Input IN, inout SurfaceOutput o)
{
fixed2 scrolledUV = IN.uv_MainTex;
fixed xScrollValue = _ScrollSpeedX * _Time;
fixed yScrollValue = _ScrollSpeedY * _Time;
scrolledUV += fixed2(xScrollValue, yScrollValue);
half2 c = tex2D(_MainTex, scrolledUV);
o.Albedo = tex2D(_MainTex, IN.uv_MainTex).rbg;
o.Albedo += c.rbg;
half rim = 1.0 - saturate(dot(normalize(IN.viewDir), o.Normal));
o.Emission = _Rimcolor.rgb * pow(rim, _RimPower);
}
ENDCG
}
}
Fallback "Diffuse"
}
My primary change to the original shader has been to add the scrolling values and "surf" to it. I have otherwise not touched the first version, which was written by forestjohnson in 2008.
You are mix matching Surface Shader code and regular shaders:
#pragma vertex vert
#pragma fragment frag
These are for explicit vertex and pixel shaders, while surf() is the unity Surface shader system.
The error pointed by the compiler is that your shader is lacking the Input struct referenced by the surf() function, but that's not what you need to look into.
struct Input {
float2 uv_MainTex;
};
https://docs.unity3d.com/Manual/SL-SurfaceShaders.html
I´m trying to make a shader which simulates a warp speed effect, and i think is almost done, only need to know what should i change in the code to make the tunnel effect completely opaque and not see anything behind the tunnel effect.
I have added an alpha slider to see if i can control opacity but still the same result at the end.
Code is here:
Shader "Warp Tunnel Distortion Opaque" {
Properties {
_TintColor ("Tint Color", Color) = (0.5,0.5,0.5,0.5)
_Speed("UV Speed", Float) = 1.0
_WiggleX("_WiggleX", Float) = 1.0
_WiggleY("_WiggleY", Float) = 1.0
_WiggleDist("Wiggle distance", Float) = 1.0
_Offset("Vertex offset", float) = 0
_TintColor("Tint", Color) = (1.0, 1.0, 1.0, 1.0)
_MainTex("Distortion map", 2D) = "" {}
_Dist("Distortion ammount", Float) = 10.0
}
Category {
Tags { "Queue"="Geometry" "RenderType"="Opaque" }
Cull Back Lighting Off ZWrite Off
Fog { Color (0,0,0,0) }
ZTest LEqual
SubShader {
GrabPass {
Name "BASE"
Tags { "LightMode" = "Always" }
}
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_particles
#include "UnityCG.cginc"
sampler2D _MainTex;
fixed4 _TintColor;
struct appdata_t {
float4 vertex : POSITION;
fixed4 color : COLOR;
float2 texcoord : TEXCOORD0;
float4 normal : NORMAL;
};
struct v2f {
float4 vertex : SV_POSITION;
fixed4 color : COLOR;
float2 texcoord : TEXCOORD0;
float4 posWorld : TEXCOORD1;
float4 uvgrab : TEXCOORD2;
float4 projPos : TEXCOORD3;
};
float4 _MainTex_ST;
float _Speed;
float _WiggleX, _WiggleY, _WiggleDist;
float _Offset;
float _Dist;
sampler2D _CameraDepthTexture;
float _InvFade;
sampler2D _GrabTexture;
float4 _GrabTexture_TexelSize;
v2f vert (appdata_t v)
{
v2f o;
o.posWorld = mul(unity_ObjectToWorld, v.vertex);
v.vertex.xyz += normalize(v.normal.xyz) * _Offset;
o.vertex = UnityObjectToClipPos(v.vertex);
o.vertex.x += sin(_Time.y * _WiggleX) * _WiggleDist;
o.vertex.y -= sin(_Time.y * _WiggleY) * _WiggleDist;
o.color = v.color;
o.texcoord = TRANSFORM_TEX(v.texcoord,_MainTex);
o.projPos = ComputeScreenPos (o.vertex);
COMPUTE_EYEDEPTH(o.projPos.z);
#if UNITY_UV_STARTS_AT_TOP
float scale = -1.0;
#else
float scale = 1.0;
#endif
o.uvgrab.xy = (float2(o.vertex.x, o.vertex.y*scale) + o.vertex.w) * 0.5;
o.uvgrab.zw = o.vertex.zw;
return o;
}
fixed4 frag (v2f i) : SV_Target
{
i.texcoord.y += _Time.x * _Speed;
float4 packedTex = tex2D(_MainTex, i.texcoord);
float local1 = packedTex.z * 2.4;
float2 local2 = packedTex.rg * 2.25;
packedTex.rg = local1 * local2;
half2 bump = UnpackNormal(packedTex).rg;
float2 offset = bump * _Dist * _GrabTexture_TexelSize.xy;
i.uvgrab.xy = offset * i.uvgrab.z + i.uvgrab.xy;
half4 col = tex2Dproj( _GrabTexture, UNITY_PROJ_COORD(i.uvgrab));
return col;
}
ENDCG
}
}
}
}
Thank you in advance.
Well if you want it completely opaque, that's easy.
Right before the return statement, just add:
col.a = 1;
If you want it to actually care about the alpha slider in the input color, then do this:
col.a = _TintColor.a;
In the first case, _TintColor isn't used at all (there are no other references to it besides its declaration, of which there are 2 at the top (you should remove one of them) and one inside the CGprogram block which links the first one to a property that can be used within the CGprogram).
If you actually want to tint things, you'll have to multiply the _TintColor with the computed color coming from the texture and effect. There are multiple ways of doing it, for example:
col.r *= _TintColor.r;
col.g *= _TintColor.g;
col.b *= _TintColor.b;
How you want to handle it is up to you.
I ported a Plasma ball shader from Shadertoy to Unity as Image Effect which is attached to the camera. It works fine on the Editor and Windows standalone build. It does not work on Android devices. It is flashing blue and black images on Android.
Here is what it looks like in Unity Editor and Windows Build:
Here is what it looks like on Android:
The ported Shader code:
Shader "Hidden/Plasma Space Ball Image Effect"
{
Properties
{
iChannel0("iChannel0", 2D) = "white" {}
//[MaterialToggle] _isToggled("isToggle", Float) = 0
}
SubShader
{
// No culling or depth
Cull Off ZWrite Off ZTest Always
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
sampler2D iChannel0;
//Ported from https://www.shadertoy.com/view/MstXzf
float3 hb(float2 pos, float t, float time, float2 rot, float size, sampler2D tex0)
{
float2 newUv = 0.2*(pos / (1.2 - t) + 0.5*time*rot);
//float texSample = texture(tex0, newUv).b;
float texSample = tex2D(tex0, newUv).b;
float uOff = 0.2*(texSample + 0.3*time); //lsf3RH
float2 starUV = newUv + float2(uOff, 0.0);
//return float3(0.3, 0.3, 1.0) + 1.3*texture(tex0, starUV).b;
return float3(0.3, 0.3, 1.0) + 1.3*tex2D(tex0, starUV).b;
}
float4 blob(float2 uv, float size, float time, sampler2D tex0)
{
float2 center = float2(0., 0.);
float2 pos = center - uv;
float t = length(pos);
float st = size - t;
float2 rot = 0.005*float2(sin(time / 16.), sin(time / 12.)); //MslGWN
float alpha = smoothstep(0.0, 0.2*size, st);
float3 col = hb(pos, t, time, rot, size, tex0);
float a1 = smoothstep(-1.4, -1.0, -col.b);
col = lerp(col, hb(pos, t, -time, -rot, size, tex0), a1);
col += 0.8*exp(-12.*abs(t - 0.8*size) / size);
float a2 = smoothstep(-1.4, -1.0, -col.b);
alpha -= a2;
//float crosshair = float((abs(pos.x) < 0.005 && abs(pos.y) < 0.15) || (abs(pos.y) < 0.005&&abs(pos.x) < 0.15));
//return float4(col, alpha) + crosshair;
return float4(col, alpha);
}
float4 main_(float2 uv, float size)
{
return blob(uv, size, _Time.y, iChannel0);
}
fixed4 frag(v2f i) : SV_Target
{
float4 fragColor = 0;
float2 fragCoord = i.vertex.xy;
///---------------------------------------------------
float2 uv = fragCoord.xy / _ScreenParams.xy;
float2 cr = uv*2. - 1.;
cr.x *= _ScreenParams.x / _ScreenParams.y;
//late addition to elaborate background motion, could be reused later on
float2 rot = 0.5*float2(sin(_Time.y / 16.), sin(_Time.y / 12.));
float4 ball = clamp(main_(cr, sin(_Time.y)*0.05 + 0.5 + 0.5), 0., 1.);
//float3 bg = float3(0.7, 0.7, 1.0)*texture(iChannel0, uv + rot + 0.1*ball.rb).b;
float3 bg = float3(0.7, 0.7, 1.0)*tex2D(iChannel0, uv + rot + 0.1*ball.rb).b;
//simulated gl blend
fragColor = float4(lerp(bg, ball.rgb, ball.a), 1.0);
//fragColor = lerp(fragColor,tex2D(iChannel0, i.uv).rgba,.5);
return fragColor;
}
ENDCG
}
}
}
You can find the image that is used for the iChannel0 input slot here in the Shader above.
Things I've tried:
Adding the shader to the Graphics Settings so that Unity will include
it in during build process.
Disabling Auto Graphics API and trying OpenGLES2 and OpenGLES3.
Checking the log with Android Studio. No error/warning at-all.
None of these solved the problem and I ran out of things to try.
Software and Device Info if that helps:
Unity 5.6.0f3
Android 4.4.2
This is used for learning and educational purposes as I am studying GLSL, HLSL, CG/shaderlab shader language. I just want to know why the ported shader is not working as expected on Android devices.
Why is it flashing blue and black images on Android?
You need to use the VPOS semantic for positions in the fragment shader for OpenGLES2.
From Unity docs:
A fragment shader can receive position of the pixel being rendered as
a special VPOS semantic. This feature only exists starting with shader
model 3.0, so the shader needs to have the #pragma target 3.0
compilation directive.
So to get screen space positions:
// note: no SV_POSITION in this struct
struct v2f {
float2 uv : TEXCOORD0;
};
v2f vert (
float4 vertex : POSITION, // vertex position input
float2 uv : TEXCOORD0, // texture coordinate input
out float4 outpos : SV_POSITION // clip space position output
)
{
v2f o;
o.uv = uv;
outpos = UnityObjectToClipPos(vertex);
return o;
}
fixed4 frag (v2f i, UNITY_VPOS_TYPE screenPos : VPOS) : SV_Target
{
// screenPos.xy will contain pixel integer coordinates.
float4 fragColor = 0;
float2 fragCoord = screenPos;
But you already pass in uvs so maybe you can use those?
float2 uv = i.uv;
It turns out I was wrong. You dont get clip space positions in the fragment shader in OpenGLES2 you get .. 0. (Maybe someone can explain this?)
I made a small test shader:
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
float4 vert (float4 vertex : POSITION) : SV_Position
{
return UnityObjectToClipPos(vertex);
}
fixed4 frag (float4 screenPos : SV_Position) : SV_Target
{
float uvx = screenPos.x/_ScreenParams.x;
return float4(uvx, 0., 0., 1.);
}
ENDCG
and the line float uvx = screenPos.x/_ScreenParams.x; gets compiled as tmpvar_2.x = (0.0 / _ScreenParams.x); // OpenGLES2
u_xlat0 = gl_FragCoord.x / _ScreenParams.x; // OpenGLES3
But if you use the VPOS semantic
fixed4 frag (float4 screenPos : VPOS) : SV_Target the same line gets compiled as
tmpvar_2.x = (gl_FragCoord.x / _ScreenParams.x); // OpenGLES2
u_xlat0 = gl_FragCoord.x / _ScreenParams.x; // OpenGLES3
So for OpenGLES2 it looks like you need to use the VPOS semantic to get positions in screen space in the fragment shader.
I have a shader which generates opacity mask and rotate it.
This is how it looks:
Generated mask looks like this:
I generate a mask via code, but I want to take mask just from a texture2D.
How can I do that?
How do I change mask generating by only texture2D?
Code of my shader:
Shader "Custom/RadialOpacity" {
Properties {
[PerRendererData]_MainTex ("MainTex", 2D) = "white" {}
_Color ("Color", Color) = (1,1,1,1)
_OpacityRotator ("Opacity Rotator", Range(-360, 360)) = -360 // 2 full circles
[HideInInspector]_Cutoff ("Alpha cutoff", Range(0,1)) = 0.5
[MaterialToggle] PixelSnap ("Pixel snap", Float) = 0
}
SubShader {
Tags {
"IgnoreProjector"="True"
"Queue"="Transparent"
"RenderType"="Transparent"
"CanUseSpriteAtlas"="True"
"PreviewType"="Plane"
}
Pass {
Name "FORWARD"
Tags {
"LightMode"="ForwardBase"
}
Blend One OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile _ PIXELSNAP_ON
#include "UnityCG.cginc"
#pragma target 3.0
uniform sampler2D _MainTex;
uniform float4 _MainTex_ST;
uniform float4 _Color;
uniform float _OpacityRotator;
static const float TAU = float(6.283185); // это 2 * PI
struct VertexInput {
float4 vertex : POSITION;
float2 texcoord0 : TEXCOORD0;
};
struct VertexOutput {
float4 pos : SV_POSITION;
float2 uv0 : TEXCOORD0;
float3 normalDir : TEXCOORD2;
};
VertexOutput vert (VertexInput v) {
VertexOutput o = (VertexOutput)0;
o.uv0 = v.texcoord0;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex );
#ifdef PIXELSNAP_ON
o.pos = UnityPixelSnap(o.pos);
#endif
return o;
}
float4 frag(VertexOutput i) : COLOR {
i.normalDir = normalize(i.normalDir);
float4 _MainTex_var = tex2D(_MainTex,TRANSFORM_TEX(i.uv0, _MainTex));
float2 oStart = (i.uv0 - 0.5);
float2 oVector = float2(-1, -1);
float oRotatorNormalized = _OpacityRotator / 360.0;
float oRotator_ang = oRotatorNormalized * -TAU;
float oRotator_cos = cos(oRotator_ang);
float oRotator_sin = sin(oRotator_ang);
float2x2 oRotationMatrix = float2x2(oRotator_cos, -oRotator_sin, oRotator_sin, oRotator_cos);
float2 oRotatorComponent = mul(oVector * oStart, oRotationMatrix);
/* generating opacity mask BEGIN_SECTION */
float2 oMaskHorizOrVert = atan2(oRotatorComponent.g, oRotatorComponent.r);
float oAtan2MaskNormalized = (oMaskHorizOrVert / TAU) + 0.5;
float oAtan2MaskRotatable = oRotatorNormalized - oAtan2MaskNormalized;
float oWhiteToBlackMask = ceil(oAtan2MaskRotatable);
/* generating opacity mask END_SECTION */
float oFinalMultiply = _MainTex_var.a * max(oAtan2MaskNormalized, ceil(oWhiteToBlackMask));
/*** (Emissive) ***/
float3 finalColor = _MainTex_var.rgb * _Color.rgb * oFinalMultiply;
return fixed4(finalColor, oFinalMultiply);
}
ENDCG
}
}
FallBack "Diffuse"
}
And I want to get something like that:
Properties {
...
_OpacityMask ("OpacityMask", 2D) = "white" {}
...
}
...
float oWhiteToBlackMask = ceil(OpacityMask);
float oFinalMultiply = _MainTex_var.a * max(oAtan2MaskNormalized, ceil(oWhiteToBlackMask));
...
https://forum.unity3d.com/threads/rotation-of-texture-uvs-directly-from-a-shader.150482/
Ok if I understand your question correctly, you want to add a texture 2D parameter and have it rotate. You'll need to rotate the UV coordinates over time, which you can probably accomplish using the code in the link above.
I'm not sure how you get that exact fade at the end with a texture 2D but maybe some clever usage of time you can figure out the animation.
I am working on my water shader but I have run into some problems. It looks like my normals aren't getting the same offset as my vertexes. You can see a white plane belof my water where the shadow of the water is cast upon, exept another white plane which is not a object but most likely the normals of my water mesh that didn't move block a part of the shadow. PLEASE I really need some help with this can't find anyone who knows what this is.
This is my code:
Shader "Custom/NoobShader_04" {
Properties {
_Color ("Color", Color) = (0,0.55,0.83,1)
_Diffuse ("Diffuse Map", 2D) = "white" {}
_Displacement ("Displacement Map", 2D) = "white" {}
_Scale ("Wave Scale", float) = 0.7
_Frequency ("Frequency", float) = 0.6
_Speed ("Speed", float) = 0.5
}
SubShader {
Pass{
Tags { "LightMode" = "ForwardBase"}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
float4 _Color;
sampler2D _Displacement;
sampler2D _Diffuse;
float _Scale;
float _Frequency;
float _Speed;
float4 _LightColor0;
struct VertexOutput
{
float4 pos : SV_POSITION;
float3 nor : NORMAL;
float4 col : COLOR;
float4 tex : TEXCOORD0;
};
struct VertexInput
{
float4 vertex : POSITION;
float3 normal : NORMAL;
float4 texcoord : TEXCOORD0;
};
struct FragmentOutput
{
float4 color : COLOR;
};
VertexOutput vert (VertexInput i)
{
VertexOutput VOUT;
float4 disp = tex2Dlod(_Displacement, float4(i.texcoord.x * _Frequency + (_Time.x * _Speed), i.texcoord.y * _Frequency + (_Time.x * _Speed),0.0,0.0));
float4 newPos = i.vertex;
float3 newNor = i.normal;
newPos.y += _Scale * disp.y;
newNor.y += _Scale * disp.y;
VOUT.nor = newNor;
VOUT.pos = mul(UNITY_MATRIX_MVP,newPos);
VOUT.tex = i.texcoord;
float3 normalDirection = normalize( mul(float4(newNor,0.0),_World2Object).xyz);
float3 lightDirection = normalize(_WorldSpaceLightPos0.xyz);
float atten = 1.0;
float3 diffuseRefflection = atten * _LightColor0.xyz * _Color.rgb * max( 0.0, dot(normalDirection, lightDirection));
VOUT.col = float4(diffuseRefflection, 1.0);
return VOUT;
}
FragmentOutput frag(VertexOutput v)
{
FragmentOutput FOUT;
float4 tex = tex2D(_Diffuse,float4(v.tex.x * _Frequency + (_Time.x * _Speed), v.tex.y * _Frequency + (_Time.x * _Speed),0.0,0.0));
FOUT.color = tex * v.col + UNITY_LIGHTMODEL_AMBIENT.xyzw;
return FOUT;
}
ENDCG
}
}
FallBack "Diffuse"
}
You can see a white plane belof my water where the shadow of the water
is cast upon, exept another white plane which is not a object but most
likely the normals of my water mesh that didn't move block a part of
the shadow.
I tried your shader and I can't see any plane, except the one I add to a scene to receive the casted shadow. In any case, water's normals don't have anything to do with the plane.
It looks like my normals aren't getting the same offset as my
vertexes
Again, not sure what you mean here. Offset has no effect on normals, because they express an orientation, not a position.
If you mean that the projected shadow on the plane, doesn't account for the vertex offset, it's because the auto generated shadow caster pass, can't take in consideration the vertex offset. So you probably need to explicitly implement it.
Something like:
Pass
{
Name "ShadowCaster"
Tags { "LightMode" = "ShadowCaster" }
Fog {Mode Off}
ZWrite On ZTest Less Cull Off
Offset 1, 1
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
#pragma multi_compile_shadowcaster
#include "UnityCG.cginc"
v2f vert( appdata_full v )
{
v2f o;
//TRANSFER_SHADOW_CASTER(o) this is how default shadow are casted
o.pos = ...;// put here your calculation taking into account the vertex offset. Basically repeating the same calculation you wrote for forward pass in regards to vertex position
return o;
}
float4 frag( v2f i ) : COLOR
{
fixed4 texcol = tex2D( _MainTex, i.uv );
clip( texcol.a - _Cutoff );
SHADOW_CASTER_FRAGMENT(i)
}
ENDCG
}