I would like to store my fragment's world coordinates into 3 textures which respectively represent x,y,z coordinates of the coordinate. Despite the textures are in the format ARGB32, I cant seem to write a value bigger than 1.0 Here is my shader code. What is the proper way to do this?
Shader "Custom/FirstPass"
{
Properties { }
SubShader
{
Tags { "RenderType"="Opaque" }
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 5.0
#include "UnityCG.cginc"
struct vertexData
{
float4 vertex : POSITION;
float3 normal : NORMAL;
float4 color : COLOR;
float4 texcoord0 : TEXCOORD0;
//...
};
struct fragmentData
{
float4 positionC : SV_POSITION;
float4 positionW : TEXCOORD1;
//float4 color : COLOR;
//...
};
struct fragmentOutput
{
float Gx : SV_Target0;
float Gy : SV_Target1;
float Gz : SV_Target2;
float depth:SV_Target3;
};
fragmentData vert(vertexData v)
{
fragmentData o;
o.positionW=mul(unity_ObjectToWorld,v.vertex);
o.positionC=UnityObjectToClipPos(v.vertex);
return o;
}
fragmentOutput frag(fragmentData fragment)
{
fragmentOutput output;
output.Gx =fragment.positionW.x;
output.Gy= fragment.positionW.y;
output.Gz = fragment.positionW.z;
output.depth=fragment.positionC.z;
//... write to other renderbuffers
return output;
}
ENDCG
}
}
}
Related
I am trying to write a cut off shader which utilizes a _MainTex in a ShadowCaster pass where I have the pass:
Pass {
Name "ShadowCaster"
Tags {
"LightMode"="ShadowCaster"
}
Offset 1, 1
Cull Off
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// ...
#pragma multi_compile_shadowcaster
// ...
// structs and variables
// ...
}
The frag shader is like:
float4 frag(VertexOutput i, float facing : VFACE) : COLOR {
// no shadow on the pixel if alpha less than a cutoff
float4 texcol = tex2D( _MainTex, i.uv1.xy );
// but it's always = 0, so everything is cutoff
clip(texcol.a * _Color.a - _Cutoff);
// it returns 0, which doesn't matter
SHADOW_CASTER_FRAGMENT(i)
}
The texcol always has 0 for alpha channel as I can test with clip(texcol.a - 0.0) and modify the number. And the value I used in tex2D is straightly a copy from previous pass:
VertexOutput vert (VertexInput v) {
VertexOutput o = (VertexOutput)0;
o.uv1 = v.texcoord1;
// ...
TRANSFER_SHADOW_CASTER(o)
return o;
}
And the structs are:
struct VertexInput {
float4 vertex : POSITION;
float2 texcoord1 : TEXCOORD1;
float2 texcoord2 : TEXCOORD2;
}
struct VertexOutput {
V2F_SHADOW_CASTER;
float2 uv1 : TEXCOORD1;
float2 uv2 : TEXCOORD2;
float4 posWorld : TEXCOORD3;
}
What is wrong with the texture transformation I used in tex2D? Or is it something else? I am sure the texture itself does provide correct alpha value.
Update. The full shader pass is on gist, there's the relevant part: https://gist.github.com/KHN190/0fd1d2bab899fbe2fcc809c6755b3d74
Im pretty new to shader coding but I made this one by grouping 2 unlit shaders
When I try to make this shader, which is meant to create an outline and let me interpolate 2 textures unity tells me that there is this error: invalid subscript `vertex´ at line 61
Ive tried some things but i dont get this shader to work properly, if someone knows what do i have to do id be so thankful
Shader "Unlit/Combined"
{
Properties
{
_MainTex("Texture", 2D) = "white" {}
_SecondaryTex("Secondary Texture", 2D) = "white" {}
_LerpValue("Transition float", Range(0,1)) = 0.5
_OutlineColor("Outline color", color) = (0,0,0,1)
_OutlineWidth("Outline width", Range(1.0,5.0)) = 1.01
}
SubShader
{
Tags { "RenderType" = "Opaque" "Queue" = "Transparent"}
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata
{
//float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float3 normal : NORMAL;
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 outvertex : SV_POSITION;
float4 pos : POSITION;
float3 normal : NORMAL;
};
sampler2D _MainTex;
float4 _MainTex_ST;
sampler2D _SecondaryTex;
float4 _SecondaryTex_ST;
float _LerpValue;
float _OutlineWidth;
float4 _OutlineColor;
v2f vert(appdata v)
{
v2f o;
v.vertex.xyz *= _OutlineWidth;
o.outvertex = UnityObjectToClipPos(v.vertex);
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
fixed4 frag(v2f i) : SV_Target
{
// sample the texture
fixed4 col = lerp(tex2D(_MainTex, i.uv),tex2D(_SecondaryTex, i.uv), _LerpValue);
return col;
return _OutlineColor;
}
ENDCG
}
Pass//Normal render
{
ZWrite On
Material
{
Diffuse[_Color]
Ambient[_Color]
}
Lighting On
SetTexture[_MainTex]
{
ConstantColor[_Color]
}
SetTexture[_MainTex]
{
Combine previous * primary DOUBLE
}
}
}
}
The line that has a problem is line 61:
v.vertex.xyz *= _OutlineWidth;
If you look at the type that v is declared as, its declared as appdata:
v2f vert(appdata v)
If you look at the definition of appdata:
struct appdata
{
//float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float3 normal : NORMAL;
};
You'll see that it does not have a vertex property, only uv and normal.
So the line that's an issue should be changed to one of those, probably normal as you're trying to modify a 3-value property of it (and uv only has xy).
v.normal.xyz *= _OutlineWidth;
Alternatively you can modify appdata to have a vertex property by uncommenteing the commented out vertex:
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float3 normal : NORMAL;
};
Or by removing line 61, depending on what you're trying to do.
I took a billboard shader from the internet but I have this warning message that says "Shader warning in 'Custom/Billboard': Use of UNITY_MATRIX_MV is detected. To transform a vertex into view space, consider using UnityObjectToViewPos for better performance."
I'm not sure how to correct the code with the new function. Here is the shader:
Shader "Custom/Billboard"
{
Properties{
_MainTex("Texture Image", 2D) = "white" {}
_ScaleX("Scale X", Float) = 1.0
_ScaleY("Scale Y", Float) = 1.0
}
SubShader{
Tags{"Queue" = "Transparent" "RenderType" = "Transparent" }
Pass{
CGPROGRAM
#include "UnityCG.cginc"
#pragma vertex vert
#pragma fragment frag
uniform sampler2D _MainTex;
uniform float _ScaleX;
uniform float _ScaleY;
struct vertexInput {
float4 vertex : POSITION;
float4 tex : TEXCOORD0;
};
struct vertexOutput {
float4 pos : POSITION;
float4 tex : TEXCOORD0;
};
vertexOutput vert(vertexInput input)
{
vertexOutput output;
output.pos = mul(UNITY_MATRIX_P,
mul(UNITY_MATRIX_MV, float4(0,0,0,1))
+ float4(input.vertex.xyz, 0));
output.tex = input.tex;
return output;
}
float4 frag(vertexOutput input) : COLOR
{
return tex2D(_MainTex, float2(input.tex.xy));
}
ENDCG
}
}
}
You don't really need to do anything. If you do want to change it though, if nothing else just to make the warning go away, you can replace this:
mul(UNITY_MATRIX_MV, float4(0,0,0,1))
With this:
UnityObjectToViewPos((float3)0)
I'm new to writing shaders and I'm working on a practice geometry shader. The goal of the shader is to make the "normal" pass produce transparent pixels such that the object is invisible, while the "geometry" pass will take each triangle, redraw in the same place as the original but colored black. Thus, I expect the output to be the original object, but black. However, my geometry pass seems to not produce any output I can see:
Here is the code I currently have for the shader.
Shader "Outlined/Silhouette2" {
Properties
{
_Color("Color", Color) = (0,0,0,1)
_MainColor("Main Color", Color) = (1,1,1,1)
_Thickness("Thickness", float) = 4
_MainTex("Main Texture", 2D) = "white" {}
}
SubShader
{
Tags{ "Queue" = "Geometry" "IgnoreProjector" = "True" "RenderType" = "Transparent" }
Blend SrcAlpha OneMinusSrcAlpha
Cull Back
ZTest always
Pass
{
Stencil{
Ref 1
Comp always
Pass replace
}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct v2g
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
struct g2f
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
float4 _LightColor0;
sampler2D _MainTex;
float4 _MainColor;
v2g vert(appdata_base v)
{
v2g OUT;
OUT.pos = mul(UNITY_MATRIX_MVP, v.vertex);
OUT.uv = v.texcoord;
OUT.normals = v.normal;
OUT.viewT = ObjSpaceViewDir(v.vertex);
return OUT;
}
half4 frag(g2f IN) : COLOR
{
//this renders nothing, if you want the base mesh and color
//fill this in with a standard fragment shader calculation
float4 texColor = tex2D(_MainTex, IN.uv);
float3 normal = mul(float4(IN.normals, 0.0), _Object2World).xyz;
float3 normalDirection = normalize(normal);
float3 lightDirection = normalize(_WorldSpaceLightPos0.xyz * -1);
float3 diffuse = _LightColor0.rgb * _MainColor.rgb * max(0.0, dot(normalDirection, lightDirection));
texColor = float4(diffuse,1) * texColor;
//
//return texColor;
return float4(0, 0, 0, 0);
}
ENDCG
}
Pass
{
Stencil{
Ref 0
Comp equal
}
CGPROGRAM
#include "UnityCG.cginc"
#pragma target 4.0
#pragma vertex vert
#pragma geometry geom
#pragma fragment frag
half4 _Color;
float _Thickness;
struct v2g
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float4 local_pos: TEXCOORD1;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
struct g2f
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
v2g vert(appdata_base v)
{
v2g OUT;
OUT.pos = mul(UNITY_MATRIX_MVP, v.vertex);
OUT.local_pos = v.vertex;
OUT.uv = v.texcoord;
OUT.normals = v.normal;
OUT.viewT = ObjSpaceViewDir(v.vertex);
return OUT;
}
[maxvertexcount(12)]
void geom(triangle v2g IN[3], inout TriangleStream<g2f> triStream)
{
g2f OUT;
OUT.pos = IN[0].pos;
OUT.uv = IN[0].uv;
OUT.viewT = IN[0].viewT;
OUT.normals = IN[0].normals;
triStream.Append(OUT);
OUT.pos = IN[1].pos;
OUT.uv = IN[1].uv;
OUT.viewT = IN[1].viewT;
OUT.normals = IN[1].normals;
triStream.Append(OUT);
OUT.pos = IN[2].pos;
OUT.uv = IN[2].uv;
OUT.viewT = IN[2].viewT;
OUT.normals = IN[2].normals;
triStream.Append(OUT);
}
half4 frag(g2f IN) : COLOR
{
_Color.a = 1;
return _Color;
}
ENDCG
}
}
FallBack "Diffuse"
}
Since all I'm doing is taking the same triangles I've been given and appending them to the triangle stream I'm not sure what I could be doing wrong to cause nothing to appear. Anyone know why this is happening?
I notice that you don't call
triStrem.RestartStrip(); after feeding in 3 vertices of a triangle in your geometry shader.
This informs the stream that a particular triangle strip has ended, and a new triangle strip will begin. If you don't do this, each (single) vertex passed to the stream will add on to the existing triangle strip, using the triangle strip pattern: https://en.wikipedia.org/wiki/Triangle_strip
I'm fairly new to geo-shaders myself, so I'm not sure if this is your issue or not, I don't THINK the RestartStrip function is called automatically at the end of each geomerty-shader, but have not tested this. Rather I think it gets called automatically only when you you reach the maxvertexcount. For a single triangle, I would set the maxvertexcount to 3, not the 12 you have now.
(I also know it can be tough to get ANY shader answers so figgured I'd try to help.)
Currently, I try to make z depth effect as Image Effect, but result image is not correctly rendered. something wrong...
If I use standard shader (unity 5), result image was correctly rendered(z depth image is ok), but not unlit shader.
what happen? if you have any idea, tell me why.
shader
Shader "Custom/RenderDepth"
{
Properties
{
_DepthLevel ("Depth Level", Range(1, 3)) = 2
}
SubShader
{
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
uniform sampler2D_float _CameraDepthTexture;
uniform fixed _DepthLevel;
uniform half4 _MainTex_TexelSize;
struct uinput
{
float4 pos : POSITION;
half2 uv : TEXCOORD0;
};
struct uoutput
{
float4 pos : SV_POSITION;
half2 uv : TEXCOORD0;
};
uoutput vert(uinput i)
{
uoutput o;
o.pos = mul(UNITY_MATRIX_MVP, i.pos);
o.uv = MultiplyUV(UNITY_MATRIX_TEXTURE0, i.uv);
return o;
}
fixed4 frag(uoutput o) : COLOR
{
float depth = UNITY_SAMPLE_DEPTH(tex2D(_CameraDepthTexture, o.uv));
depth = pow(Linear01Depth(depth), _DepthLevel);
return depth;
}
ENDCG
}
}
}
CS
using UnityEngine;
using System.Collections;
[ExecuteInEditMode]
[RequireComponent (typeof(Camera))]
public class Test : MonoBehaviour
{
public Camera _cam;
public Material mat;
public float DepthLevel = 1.0F;
void Start ()
{
_cam.depthTextureMode |= DepthTextureMode.Depth;
}
void Update ()
{
}
void OnRenderImage (RenderTexture source, RenderTexture destination)
{
mat.SetFloat("_DepthLevel", DepthLevel);
Graphics.Blit(source, destination, mat);
}
}
I've found this solution. I use the SHADOWCASTER pass from the VertexLit legacy shader prior to render my unlit object.
Then your shader would look like this:
Shader "Custom/RenderDepth"
{
Properties
{
_DepthLevel ("Depth Level", Range(1, 3)) = 2
}
SubShader
{
UsePass "Legacy Shaders/VertexLit/SHADOWCASTER"
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
uniform sampler2D_float _CameraDepthTexture;
uniform fixed _DepthLevel;
uniform half4 _MainTex_TexelSize;
struct uinput
{
float4 pos : POSITION;
half2 uv : TEXCOORD0;
};
struct uoutput
{
float4 pos : SV_POSITION;
half2 uv : TEXCOORD0;
};
uoutput vert(uinput i)
{
uoutput o;
o.pos = mul(UNITY_MATRIX_MVP, i.pos);
o.uv = MultiplyUV(UNITY_MATRIX_TEXTURE0, i.uv);
return o;
}
fixed4 frag(uoutput o) : COLOR
{
float depth = UNITY_SAMPLE_DEPTH(tex2D(_CameraDepthTexture, o.uv));
depth = pow(Linear01Depth(depth), _DepthLevel);
return depth;
}
ENDCG
}
}
}
You need to provide a fallback option in the unlit shader so it can use the other needed passes (depth/shadow/etc) from this fallback shader.
Adding the following line to your unlit shader should help.
Fallback "Diffuse"