I am trying to write a cut off shader which utilizes a _MainTex in a ShadowCaster pass where I have the pass:
Pass {
Name "ShadowCaster"
Tags {
"LightMode"="ShadowCaster"
}
Offset 1, 1
Cull Off
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// ...
#pragma multi_compile_shadowcaster
// ...
// structs and variables
// ...
}
The frag shader is like:
float4 frag(VertexOutput i, float facing : VFACE) : COLOR {
// no shadow on the pixel if alpha less than a cutoff
float4 texcol = tex2D( _MainTex, i.uv1.xy );
// but it's always = 0, so everything is cutoff
clip(texcol.a * _Color.a - _Cutoff);
// it returns 0, which doesn't matter
SHADOW_CASTER_FRAGMENT(i)
}
The texcol always has 0 for alpha channel as I can test with clip(texcol.a - 0.0) and modify the number. And the value I used in tex2D is straightly a copy from previous pass:
VertexOutput vert (VertexInput v) {
VertexOutput o = (VertexOutput)0;
o.uv1 = v.texcoord1;
// ...
TRANSFER_SHADOW_CASTER(o)
return o;
}
And the structs are:
struct VertexInput {
float4 vertex : POSITION;
float2 texcoord1 : TEXCOORD1;
float2 texcoord2 : TEXCOORD2;
}
struct VertexOutput {
V2F_SHADOW_CASTER;
float2 uv1 : TEXCOORD1;
float2 uv2 : TEXCOORD2;
float4 posWorld : TEXCOORD3;
}
What is wrong with the texture transformation I used in tex2D? Or is it something else? I am sure the texture itself does provide correct alpha value.
Update. The full shader pass is on gist, there's the relevant part: https://gist.github.com/KHN190/0fd1d2bab899fbe2fcc809c6755b3d74
Related
I'm trying to archieve a chilindrical effect like this on Unity3D:
But every solution is using material based shader, sadly I must have a Post Process effect or an Image Effect, for these reasons:
One map out of 30 needs to use this effect and there are many materials that are shared between them...
Every solution is vertex based. I've done some experiments but I have models with different polygon count, this means that the effect would create visual artifacts (but this can by fixed by editing the 3d models eventually).
I'm at an advanced stage of development.
Do you think it's possible to create a simple effect (even a fake one) that moves the pixels downwards/upwards based on the distance to the camera? (I assume I need to use the depth map)
I've tried very hard but I had no success, the effect doesn't do anything or just won't compile :(
This is the best I could come up with, the grayscale in the frag method is only to check if the shader is working, but once I define the Vert function the grayscale disappears and the shader does nothing.
Shader "Hidden/Custom/WorldCurvature"
{
HLSLINCLUDE
#include "Packages/com.unity.postprocessing/PostProcessing/Shaders/StdLib.hlsl"
TEXTURE2D_SAMPLER2D(_MainTex, sampler_MainTex);
float _Bend;
struct Attributes
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
struct Varyings
{
float4 vertex : SV_POSITION;
float2 texcoord : TEXCOORD0;
};
float4 Frag(Varyings i) : SV_Target
{
float4 color = SAMPLE_TEXTURE2D(_MainTex, sampler_MainTex, i.texcoord);
float luminance = dot(color.rgb, float3(0.2126729, 0.7151522, 0.0721750));
color.rgb = lerp(color.rgb, luminance.xxx, _Bend.xxx);
return color;
}
Varyings Vert(Attributes v)
{
Varyings o;
float4 vv = mul(unity_ObjectToWorld, v.vertex );
vv.xyz -= _WorldSpaceCameraPos.xyz;
vv = float4( 0.0f, (vv.x * vv.x) * - _Bend, 0.0f, 0.0f );
v.vertex += mul(unity_WorldToCamera, vv);
o.vertex = mul(unity_WorldToCamera, vv);
o.texcoord = v.texcoord;
return o;
}
ENDHLSL
SubShader
{
Cull Off ZWrite Off ZTest Always
Pass
{
HLSLPROGRAM
#pragma vertex Vert
#pragma fragment Frag
ENDHLSL
}
}
}
I've done another experiment but I think it would only work in a 2D environment, here the image stops once I activate the image effect:
Shader "Hidden/Custom/CylinderImageEffect" {
Properties {
_MainTex ("Texture", 2D) = "white" {}
}
SubShader {
Cull Off ZWrite Off ZTest Always
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct v2f {
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
};
v2f vert( appdata_img v )
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord.xy;
return o;
}
sampler2D _MainTex;
fixed4 frag (v2f i) : SV_Target {
i.uv.x = 1 - acos(i.uv.x * 2 - 1) / 3.14159265;
return tex2D(_MainTex, i.uv);
}
ENDCG
}
}
}
I'm currently stuck in a shader that i'm writing.
I'm trying to create a rain shader.
I have set up 3 particle system which simulates rain, and a
camera to look at this simulation. The camera view is what I
use as texture. In my shader I am now trying to make a normal map
from that texture map, but I don't know how to do it.
Shader "Unlit/Rain"
{
Properties
{
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Color("Color", Color) = (1,1,1,1)
_NormalIntensity("NormalIntensity",Float) = 1
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#include "Lighting.cginc"
#include "AutoLight.cginc"
struct VertexInput {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float4 normal : NORMAL;
float3 tangent : TANGENT;
};
struct VertexOutput {
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float2 uv1 : TEXCOORD1;
float4 normals : NORMAL;
float3 tangentSpaceLight: TANGENT;
};
sampler2D _MainTex;
float4 _MainTex_ST;
half4 _Color;
float _NormalIntensity;
VertexOutput vert(VertexInput v) {
VertexOutput o;
o.normals = v.normal;
o.uv1 = v.uv;
o.vertex = UnityObjectToClipPos( v.vertex );
// o.uv = TRANSFORM_TEX( v.uv, _MainTex ); // used for texture
return o;
}
float4 frag(VertexOutput i) : COLOR{
float4 col2 = tex2D(_MainTex, i.uv1);
return col2 * i.normals * 5;
}
ENDCG
}
}
}
This is what the camera sees. I set the TargetTexture for this camera to be a texture I created.
In my shader I then put that texture as an albedo property
So what I wanna do is now find the normal for that texture to create a bumpmap.
It looks like your "TargetTexture" is giving you back a height map. Here is a post I found about how to turn a height map into a normal map. I've mashed the code you had originally together with the core of that forum post and output the normals as color so you can test and see how this works:
Shader "Unlit/HeightToNormal"
{
Properties
{
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Color("Color", Color) = (1,1,1,1)
_NormalIntensity("NormalIntensity",Float) = 1
_HeightMapSizeX("HeightMapSizeX",Float) = 1024
_HeightMapSizeY("HeightMapSizeY",Float) = 1024
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#include "Lighting.cginc"
#include "AutoLight.cginc"
struct VertexInput {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float4 normal : NORMAL;
float3 tangent : TANGENT;
};
struct VertexOutput {
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float2 uv1 : TEXCOORD1;
float4 normals : NORMAL;
//float3 tangentSpaceLight: TANGENT;
};
sampler2D _MainTex;
float4 _MainTex_ST;
half4 _Color;
float _NormalIntensity;
float _HeightMapSizeX;
float _HeightMapSizeY;
VertexOutput vert(VertexInput v) {
VertexOutput o;
o.uv = TRANSFORM_TEX( v.uv, _MainTex ); // used for texture
o.uv1 = v.uv;
o.normals = v.normal;
o.vertex = UnityObjectToClipPos(v.vertex);
return o;
}
float4 frag(VertexOutput i) : COLOR
{
float me = tex2D(_MainTex,i.uv1).x;
float n = tex2D(_MainTex,float2(i.uv1.x, i.uv1.y + 1.0 / _HeightMapSizeY)).x;
float s = tex2D(_MainTex,float2(i.uv1.x, i.uv1.y - 1.0 / _HeightMapSizeY)).x;
float e = tex2D(_MainTex,float2(i.uv1.x + 1.0 / _HeightMapSizeX,i.uv1.y)).x;
float w = tex2D(_MainTex,float2(i.uv1.x - 1.0 / _HeightMapSizeX,i.uv1.y)).x;
// defining starting normal as color has some interesting effects, generally makes this more flexible
float3 norm = _Color;
float3 temp = norm; //a temporary vector that is not parallel to norm
if (norm.x == 1)
temp.y += 0.5;
else
temp.x += 0.5;
//form a basis with norm being one of the axes:
float3 perp1 = normalize(cross(i.normals,temp));
float3 perp2 = normalize(cross(i.normals,perp1));
//use the basis to move the normal i its own space by the offset
float3 normalOffset = -_NormalIntensity * (((n - me) - (s - me)) * perp1 + ((e - me) - (w - me)) * perp2);
norm += normalOffset;
norm = normalize(norm);
// it's also interesting to output temp, perp1, and perp1, or combinations of the float samples.
return float4(norm, 1);
}
ENDCG
}
}
}
To generate a normal map from a height map, you're trying to use the oriented rate of change in your height map to come up with a normal vector which can be represented using 3 float values (or color channels, if it's an image). You can sample the point of image you are on, and then small steps away from that point in four cardinal directions. Using the cross product to guarantee orthogonality you can define a basis. Using your oriented steps on your image, you can scale the two basis vectors and add them together to find a "normal offset", which is the 3D representation approximating the oriented change in value on your heightmap. Basically it's your normal.
You can see the effects of me playing with normal intensity here, and the "normal color" here. When this looks right for your use case, you can try using normals as normals instead of colored output.
Some tweaking of values will probably still be required. Good luck!
I took a billboard shader from the internet but I have this warning message that says "Shader warning in 'Custom/Billboard': Use of UNITY_MATRIX_MV is detected. To transform a vertex into view space, consider using UnityObjectToViewPos for better performance."
I'm not sure how to correct the code with the new function. Here is the shader:
Shader "Custom/Billboard"
{
Properties{
_MainTex("Texture Image", 2D) = "white" {}
_ScaleX("Scale X", Float) = 1.0
_ScaleY("Scale Y", Float) = 1.0
}
SubShader{
Tags{"Queue" = "Transparent" "RenderType" = "Transparent" }
Pass{
CGPROGRAM
#include "UnityCG.cginc"
#pragma vertex vert
#pragma fragment frag
uniform sampler2D _MainTex;
uniform float _ScaleX;
uniform float _ScaleY;
struct vertexInput {
float4 vertex : POSITION;
float4 tex : TEXCOORD0;
};
struct vertexOutput {
float4 pos : POSITION;
float4 tex : TEXCOORD0;
};
vertexOutput vert(vertexInput input)
{
vertexOutput output;
output.pos = mul(UNITY_MATRIX_P,
mul(UNITY_MATRIX_MV, float4(0,0,0,1))
+ float4(input.vertex.xyz, 0));
output.tex = input.tex;
return output;
}
float4 frag(vertexOutput input) : COLOR
{
return tex2D(_MainTex, float2(input.tex.xy));
}
ENDCG
}
}
}
You don't really need to do anything. If you do want to change it though, if nothing else just to make the warning go away, you can replace this:
mul(UNITY_MATRIX_MV, float4(0,0,0,1))
With this:
UnityObjectToViewPos((float3)0)
I would like to store my fragment's world coordinates into 3 textures which respectively represent x,y,z coordinates of the coordinate. Despite the textures are in the format ARGB32, I cant seem to write a value bigger than 1.0 Here is my shader code. What is the proper way to do this?
Shader "Custom/FirstPass"
{
Properties { }
SubShader
{
Tags { "RenderType"="Opaque" }
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 5.0
#include "UnityCG.cginc"
struct vertexData
{
float4 vertex : POSITION;
float3 normal : NORMAL;
float4 color : COLOR;
float4 texcoord0 : TEXCOORD0;
//...
};
struct fragmentData
{
float4 positionC : SV_POSITION;
float4 positionW : TEXCOORD1;
//float4 color : COLOR;
//...
};
struct fragmentOutput
{
float Gx : SV_Target0;
float Gy : SV_Target1;
float Gz : SV_Target2;
float depth:SV_Target3;
};
fragmentData vert(vertexData v)
{
fragmentData o;
o.positionW=mul(unity_ObjectToWorld,v.vertex);
o.positionC=UnityObjectToClipPos(v.vertex);
return o;
}
fragmentOutput frag(fragmentData fragment)
{
fragmentOutput output;
output.Gx =fragment.positionW.x;
output.Gy= fragment.positionW.y;
output.Gz = fragment.positionW.z;
output.depth=fragment.positionC.z;
//... write to other renderbuffers
return output;
}
ENDCG
}
}
}
I'm new to writing shaders and I'm working on a practice geometry shader. The goal of the shader is to make the "normal" pass produce transparent pixels such that the object is invisible, while the "geometry" pass will take each triangle, redraw in the same place as the original but colored black. Thus, I expect the output to be the original object, but black. However, my geometry pass seems to not produce any output I can see:
Here is the code I currently have for the shader.
Shader "Outlined/Silhouette2" {
Properties
{
_Color("Color", Color) = (0,0,0,1)
_MainColor("Main Color", Color) = (1,1,1,1)
_Thickness("Thickness", float) = 4
_MainTex("Main Texture", 2D) = "white" {}
}
SubShader
{
Tags{ "Queue" = "Geometry" "IgnoreProjector" = "True" "RenderType" = "Transparent" }
Blend SrcAlpha OneMinusSrcAlpha
Cull Back
ZTest always
Pass
{
Stencil{
Ref 1
Comp always
Pass replace
}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct v2g
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
struct g2f
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
float4 _LightColor0;
sampler2D _MainTex;
float4 _MainColor;
v2g vert(appdata_base v)
{
v2g OUT;
OUT.pos = mul(UNITY_MATRIX_MVP, v.vertex);
OUT.uv = v.texcoord;
OUT.normals = v.normal;
OUT.viewT = ObjSpaceViewDir(v.vertex);
return OUT;
}
half4 frag(g2f IN) : COLOR
{
//this renders nothing, if you want the base mesh and color
//fill this in with a standard fragment shader calculation
float4 texColor = tex2D(_MainTex, IN.uv);
float3 normal = mul(float4(IN.normals, 0.0), _Object2World).xyz;
float3 normalDirection = normalize(normal);
float3 lightDirection = normalize(_WorldSpaceLightPos0.xyz * -1);
float3 diffuse = _LightColor0.rgb * _MainColor.rgb * max(0.0, dot(normalDirection, lightDirection));
texColor = float4(diffuse,1) * texColor;
//
//return texColor;
return float4(0, 0, 0, 0);
}
ENDCG
}
Pass
{
Stencil{
Ref 0
Comp equal
}
CGPROGRAM
#include "UnityCG.cginc"
#pragma target 4.0
#pragma vertex vert
#pragma geometry geom
#pragma fragment frag
half4 _Color;
float _Thickness;
struct v2g
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float4 local_pos: TEXCOORD1;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
struct g2f
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
v2g vert(appdata_base v)
{
v2g OUT;
OUT.pos = mul(UNITY_MATRIX_MVP, v.vertex);
OUT.local_pos = v.vertex;
OUT.uv = v.texcoord;
OUT.normals = v.normal;
OUT.viewT = ObjSpaceViewDir(v.vertex);
return OUT;
}
[maxvertexcount(12)]
void geom(triangle v2g IN[3], inout TriangleStream<g2f> triStream)
{
g2f OUT;
OUT.pos = IN[0].pos;
OUT.uv = IN[0].uv;
OUT.viewT = IN[0].viewT;
OUT.normals = IN[0].normals;
triStream.Append(OUT);
OUT.pos = IN[1].pos;
OUT.uv = IN[1].uv;
OUT.viewT = IN[1].viewT;
OUT.normals = IN[1].normals;
triStream.Append(OUT);
OUT.pos = IN[2].pos;
OUT.uv = IN[2].uv;
OUT.viewT = IN[2].viewT;
OUT.normals = IN[2].normals;
triStream.Append(OUT);
}
half4 frag(g2f IN) : COLOR
{
_Color.a = 1;
return _Color;
}
ENDCG
}
}
FallBack "Diffuse"
}
Since all I'm doing is taking the same triangles I've been given and appending them to the triangle stream I'm not sure what I could be doing wrong to cause nothing to appear. Anyone know why this is happening?
I notice that you don't call
triStrem.RestartStrip(); after feeding in 3 vertices of a triangle in your geometry shader.
This informs the stream that a particular triangle strip has ended, and a new triangle strip will begin. If you don't do this, each (single) vertex passed to the stream will add on to the existing triangle strip, using the triangle strip pattern: https://en.wikipedia.org/wiki/Triangle_strip
I'm fairly new to geo-shaders myself, so I'm not sure if this is your issue or not, I don't THINK the RestartStrip function is called automatically at the end of each geomerty-shader, but have not tested this. Rather I think it gets called automatically only when you you reach the maxvertexcount. For a single triangle, I would set the maxvertexcount to 3, not the 12 you have now.
(I also know it can be tough to get ANY shader answers so figgured I'd try to help.)