If I have a Unity shader which is acting as a basic screenwide image filter, would there be any way (outside of making a material glowing/ unshaded) to "recognize" specific pixels of a material? Let's say I want a heat vision filter, and allow specific objects to be considered "hot". How could the pixel color shader check their color or anything else and understand "this is hot"? (If I made it glowing/ unshaded, I could encode specific properties into subtle rgb changes, e.g. maybe if all of rgb end in *.***5 it would mean hot, but that wouldn't work with shading applied.) Thanks!
First you should convert your Image to grayscale then apply a color spectrum to It.If you look at below Image,
the closer to the white Is warmer and the closer to black Is colder.
then you can add this effect by using Replacement Shader.
Shader"Hidden/HeatMap"{
Properties{
_MainTex("_MainTex", 2D) = "white"{}
_Amount("Amount",Float) = 1
[Toggle]_Enable("Enable",Float) = 1
}
SubShader{
Pass{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
#include "UnityCG.cginc"
struct appdata{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
sampler2D _MainTex;
float _Amount,_Enable;
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
fixed greyScale(fixed3 rgb) {
return dot(rgb, fixed3(0.29, 0.60, 0.11));
}
fixed3 heatMap(fixed greyValue) {
fixed3 heat = fixed3(0,0,0);
heat.r = smoothstep(0.4, 0.8, greyValue);
half OutColorGreen = smoothstep(0.0, 0.7, greyValue);
half InColorGreen = smoothstep(1.0, 0.7, greyValue);
heat.g = min(InColorGreen,OutColorGreen);
float OutColorBlue = smoothstep(1.0, 0.0, greyValue);
float InColorBlue = smoothstep(0.0, 0.25, greyValue);
heat.b = min(OutColorBlue,InColorBlue);
return heat;
}
fixed4 frag(v2f i) : COLOR{
fixed2 uv = i.uv;
fixed3 mainTex = tex2D(_MainTex, uv).rgb;
fixed grayValueA = greyScale(mainTex);
fixed3 rgbOut;
rgbOut = heatMap(uv.y);
rgbOut = heatMap(grayValueA * _Amount);
return fixed4(lerp(mainTex,rgbOut,_Enable),1);
}ENDCG
}
}
}
Let's check it out once more:
First you should convert your Image to grayscale.
then try to recolorize It by a method:
Using ZBuffer
In computer graphics, z-buffering, also known as depth buffering, is the management of image depth coordinates in 3D graphics, usually done in hardware, sometimes in software.
https://en.wikipedia.org/wiki/Z-buffering
using UnityEngine;
[ExecuteInEditMode]
public class CameraScript : MonoBehaviour {
public Material mat;
void Start()
{
GetComponent<Camera>().depthTextureMode = DepthTextureMode.Depth;
}
void OnRenderImage(RenderTexture source, RenderTexture destination)
{
Graphics.Blit(source, destination, mat);
}
}
Shader "Custom/HeatVision"
{
SubShader
{
Tags { "RenderType"="Opaque" }
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct v2f
{
float4 pos : SV_POSITION;
float4 screenuv : TEXCOORD1;
};
v2f vert (appdata_base v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.screenuv = ComputeScreenPos(o.pos);
return o;
}
sampler2D _CameraDepthTexture;
fixed greyScale(fixed3 rgb) {
return dot(rgb, fixed3(0.29, 0.60, 0.11));
}
fixed3 heatMap(fixed greyValue) {
fixed3 heat = fixed3(0,0,0);
heat.r = smoothstep(0.4, 0.8, greyValue);
half OutColorGreen = smoothstep(0.0, 0.7, greyValue);
half InColorGreen = smoothstep(1.0, 0.7, greyValue);
heat.g = min(InColorGreen,OutColorGreen);
float OutColorBlue = smoothstep(1.0, 0.0, greyValue);
float InColorBlue = smoothstep(0.0, 0.25, greyValue);
heat.b = min(OutColorBlue,InColorBlue);
return heat;
}
fixed4 frag (v2f i) : SV_Target
{
float2 uv = i.screenuv.xy / i.screenuv.w;
fixed3 mainTex = tex2D(_CameraDepthTexture, uv).rgb;
fixed grayValueA = greyScale(mainTex);
fixed3 rgbOut;
float Intensity = 15;
float depth = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, uv)*Intensity;
rgbOut = heatMap(uv.y);
rgbOut = heatMap(depth);
return float4(rgbOut.rgb,1);
}
ENDCG
}
}
}
Related
I'm currently stuck in a shader that i'm writing.
I'm trying to create a rain shader.
I have set up 3 particle system which simulates rain, and a
camera to look at this simulation. The camera view is what I
use as texture. In my shader I am now trying to make a normal map
from that texture map, but I don't know how to do it.
Shader "Unlit/Rain"
{
Properties
{
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Color("Color", Color) = (1,1,1,1)
_NormalIntensity("NormalIntensity",Float) = 1
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#include "Lighting.cginc"
#include "AutoLight.cginc"
struct VertexInput {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float4 normal : NORMAL;
float3 tangent : TANGENT;
};
struct VertexOutput {
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float2 uv1 : TEXCOORD1;
float4 normals : NORMAL;
float3 tangentSpaceLight: TANGENT;
};
sampler2D _MainTex;
float4 _MainTex_ST;
half4 _Color;
float _NormalIntensity;
VertexOutput vert(VertexInput v) {
VertexOutput o;
o.normals = v.normal;
o.uv1 = v.uv;
o.vertex = UnityObjectToClipPos( v.vertex );
// o.uv = TRANSFORM_TEX( v.uv, _MainTex ); // used for texture
return o;
}
float4 frag(VertexOutput i) : COLOR{
float4 col2 = tex2D(_MainTex, i.uv1);
return col2 * i.normals * 5;
}
ENDCG
}
}
}
This is what the camera sees. I set the TargetTexture for this camera to be a texture I created.
In my shader I then put that texture as an albedo property
So what I wanna do is now find the normal for that texture to create a bumpmap.
It looks like your "TargetTexture" is giving you back a height map. Here is a post I found about how to turn a height map into a normal map. I've mashed the code you had originally together with the core of that forum post and output the normals as color so you can test and see how this works:
Shader "Unlit/HeightToNormal"
{
Properties
{
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Color("Color", Color) = (1,1,1,1)
_NormalIntensity("NormalIntensity",Float) = 1
_HeightMapSizeX("HeightMapSizeX",Float) = 1024
_HeightMapSizeY("HeightMapSizeY",Float) = 1024
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#include "Lighting.cginc"
#include "AutoLight.cginc"
struct VertexInput {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float4 normal : NORMAL;
float3 tangent : TANGENT;
};
struct VertexOutput {
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float2 uv1 : TEXCOORD1;
float4 normals : NORMAL;
//float3 tangentSpaceLight: TANGENT;
};
sampler2D _MainTex;
float4 _MainTex_ST;
half4 _Color;
float _NormalIntensity;
float _HeightMapSizeX;
float _HeightMapSizeY;
VertexOutput vert(VertexInput v) {
VertexOutput o;
o.uv = TRANSFORM_TEX( v.uv, _MainTex ); // used for texture
o.uv1 = v.uv;
o.normals = v.normal;
o.vertex = UnityObjectToClipPos(v.vertex);
return o;
}
float4 frag(VertexOutput i) : COLOR
{
float me = tex2D(_MainTex,i.uv1).x;
float n = tex2D(_MainTex,float2(i.uv1.x, i.uv1.y + 1.0 / _HeightMapSizeY)).x;
float s = tex2D(_MainTex,float2(i.uv1.x, i.uv1.y - 1.0 / _HeightMapSizeY)).x;
float e = tex2D(_MainTex,float2(i.uv1.x + 1.0 / _HeightMapSizeX,i.uv1.y)).x;
float w = tex2D(_MainTex,float2(i.uv1.x - 1.0 / _HeightMapSizeX,i.uv1.y)).x;
// defining starting normal as color has some interesting effects, generally makes this more flexible
float3 norm = _Color;
float3 temp = norm; //a temporary vector that is not parallel to norm
if (norm.x == 1)
temp.y += 0.5;
else
temp.x += 0.5;
//form a basis with norm being one of the axes:
float3 perp1 = normalize(cross(i.normals,temp));
float3 perp2 = normalize(cross(i.normals,perp1));
//use the basis to move the normal i its own space by the offset
float3 normalOffset = -_NormalIntensity * (((n - me) - (s - me)) * perp1 + ((e - me) - (w - me)) * perp2);
norm += normalOffset;
norm = normalize(norm);
// it's also interesting to output temp, perp1, and perp1, or combinations of the float samples.
return float4(norm, 1);
}
ENDCG
}
}
}
To generate a normal map from a height map, you're trying to use the oriented rate of change in your height map to come up with a normal vector which can be represented using 3 float values (or color channels, if it's an image). You can sample the point of image you are on, and then small steps away from that point in four cardinal directions. Using the cross product to guarantee orthogonality you can define a basis. Using your oriented steps on your image, you can scale the two basis vectors and add them together to find a "normal offset", which is the 3D representation approximating the oriented change in value on your heightmap. Basically it's your normal.
You can see the effects of me playing with normal intensity here, and the "normal color" here. When this looks right for your use case, you can try using normals as normals instead of colored output.
Some tweaking of values will probably still be required. Good luck!
I have a shader that allows me to create and rotate a 2 or 3 color gradient. My problem was that it was very heavy on the GPU, so I moved this part of the code from the fragment shader to the vertex shader:
fixed4 frag (v2f i) : SV_Target
{
//STARTS HERE
float2 uv = - (i.screenPos.xy / i.screenPos.w - 0.5)*2;
fixed3 c;
#if _BG_COLOR_GRADIENT2
c = lerp(_BgColor1,_BgColor3,clampValue(rotateUV(uv.xy,_BgColorRotation*PI).y,_BgColorPosition));
#elif _BG_COLOR_GRADIENT3
c = lerp3(_BgColor1,_BgColor2,_BgColor3,clampValue(rotateUV(uv.xy,_BgColorRotation*PI).y,_BgColorPosition),_BgColorPosition3);
#endif
//ENDS HERE
return fixed4(c, i.color.a);
}
Now my shader looks like this:
Shader "Custom/Gradient"
{
Properties
{
[KeywordEnum(Gradient2, Gradient3)] _BG_COLOR ("Color Type", Float) = 1
_Color("Color", Color) = (1, 1, 1, 1)
_BgColor1 ("Start Color",Color) = (0.667,0.851,0.937,1)
_BgColor2 ("Middle Color",Color) = (0.29, 0.8, 0.2,1)
_BgColor3 ("End Color",Color) = (0.29, 0.8, 0.2,1)
[GradientPositionSliderDrawer]
_BgColorPosition ("Gradient Position",Vector) = (0,1,0)
_BgColorRotation ("Gradient Rotation",Range(0,2)) = 0
_BgColorPosition3 ("Middle Size",Range(0,1)) = 0
}
SubShader
{
Tags{ "Queue" = "Background" "IgnoreProjectors"="True" }
Blend SrcAlpha OneMinusSrcAlpha
AlphaTest Greater .01
ColorMask RGB
Cull Off Lighting Off ZWrite Off
BindChannels {
Bind "Color", color
Bind "Vertex", vertex
Bind "TexCoord", texcoord
}
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma shader_feature _BG_COLOR_GRADIENT2 _BG_COLOR_GRADIENT3
#include "UnityCG.cginc"
#include "GradientHelper.cginc"
struct appdata
{
float4 vertex : POSITION;
fixed4 color : COLOR;
};
struct v2f
{
float4 pos : SV_POSITION;
float4 screenPos : TEXCOORD4;
fixed4 color : COLOR;
};
fixed4 _BgColor1;
fixed4 _BgColor2;
fixed4 _BgColor3;
float _BgColorRotation;
float2 _BgColorPosition;
float _BgColorPosition3;
float4 _Color;
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.screenPos = ComputeScreenPos(o.pos);
float2 uv = - (o.screenPos.xy / o.screenPos.w - 0.5)*2;
#if _BG_COLOR_GRADIENT2
o.color = lerp(_BgColor1,_BgColor3,clampValue(rotateUV(uv.xy,_BgColorRotation*PI).y,_BgColorPosition)) * v.color;
#elif _BG_COLOR_GRADIENT3
o.color = lerp3(_BgColor1,_BgColor2,_BgColor3,clampValue(rotateUV(uv.xy,_BgColorRotation*PI).y,_BgColorPosition),_BgColorPosition3) * v.color;
#endif
return o;
}
fixed4 frag (v2f i) : COLOR {
return i.color;
}
ENDCG
}
}
CustomEditor "Background.Editor.BackgroundGradientEditor"
}
(Here is my shader helper):
#ifndef PI
#define PI 3.141592653589793
#endif
#ifndef HALF_PI
#define HALF_PI 1.5707963267948966
#endif
// Helper Funtions
inline float clampValue(float input, float2 limit)
{
float minValue = 1-limit.y;
float maxValue = 1-limit.x;
if(input<=minValue){
return 0;
} else if(input>=maxValue){
return 1;
} else {
return (input - minValue )/(maxValue-minValue);
}
}
inline float2 rotateUV(fixed2 uv, float rotation)
{
float sinX = sin (rotation);
float cosX = cos (rotation);
float2x2 rotationMatrix = float2x2(cosX, -sinX, sinX, cosX);
return mul ( uv, rotationMatrix )/2 + 0.5;
}
inline fixed4 lerp3(fixed4 a, fixed4 b, fixed4 c, float pos, float size){
float ratio2 = 0.5+size*0.5;
float ratio1 = 1-ratio2;
if(pos<ratio1)
return lerp(a,b,pos/ratio1);
else if(pos>ratio2)
return lerp(b,c,(pos-ratio2)/ratio1);
else
return b;
}
#endif
The performance is great now, but the rotation is totally messed up (most noticeable on the 3 color gradient) and I can't seem to figure it out why.
I never understand why people want to make their gradients inside the shader, it is quite limited and not necessarily more performant unless you are changing the values every frame. My best solution for this would be to generate the gradient as a texture on the CPU, with the size 1x128. Use the Gradient class which is provided by Unity, and loop:
Texture2D texture = new Texture2D(128, 1);
Color[] pixels = Color[128];
for (int i = 0; i < 128; i++) {
pixels[i] = gradient.Evaluate(i/127f);
}
texture.SetPixels(pixels);
texture.Apply();
Send it to the shader using:
material.SetTexture("_Gradient", texture)
Then, you can rotate and scroll along this texture all you want using a 2x2 matrix like you did. Just make sure to set texture overflow mode to clamp and not repeat. Remember that you can implement OnValidate() into your behavior to apply value updates in the editor, if you need to update it in build though, you will need to listen to changes some other way.
Using vertex colors would indeed be useful for gradients, since these are interpolated in the hardware... but from my understanding, this is a screen-space effect, and as such you would need the vertices to line up with the actual gradient bands.
I´m trying to make a shader which simulates a warp speed effect, and i think is almost done, only need to know what should i change in the code to make the tunnel effect completely opaque and not see anything behind the tunnel effect.
I have added an alpha slider to see if i can control opacity but still the same result at the end.
Code is here:
Shader "Warp Tunnel Distortion Opaque" {
Properties {
_TintColor ("Tint Color", Color) = (0.5,0.5,0.5,0.5)
_Speed("UV Speed", Float) = 1.0
_WiggleX("_WiggleX", Float) = 1.0
_WiggleY("_WiggleY", Float) = 1.0
_WiggleDist("Wiggle distance", Float) = 1.0
_Offset("Vertex offset", float) = 0
_TintColor("Tint", Color) = (1.0, 1.0, 1.0, 1.0)
_MainTex("Distortion map", 2D) = "" {}
_Dist("Distortion ammount", Float) = 10.0
}
Category {
Tags { "Queue"="Geometry" "RenderType"="Opaque" }
Cull Back Lighting Off ZWrite Off
Fog { Color (0,0,0,0) }
ZTest LEqual
SubShader {
GrabPass {
Name "BASE"
Tags { "LightMode" = "Always" }
}
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_particles
#include "UnityCG.cginc"
sampler2D _MainTex;
fixed4 _TintColor;
struct appdata_t {
float4 vertex : POSITION;
fixed4 color : COLOR;
float2 texcoord : TEXCOORD0;
float4 normal : NORMAL;
};
struct v2f {
float4 vertex : SV_POSITION;
fixed4 color : COLOR;
float2 texcoord : TEXCOORD0;
float4 posWorld : TEXCOORD1;
float4 uvgrab : TEXCOORD2;
float4 projPos : TEXCOORD3;
};
float4 _MainTex_ST;
float _Speed;
float _WiggleX, _WiggleY, _WiggleDist;
float _Offset;
float _Dist;
sampler2D _CameraDepthTexture;
float _InvFade;
sampler2D _GrabTexture;
float4 _GrabTexture_TexelSize;
v2f vert (appdata_t v)
{
v2f o;
o.posWorld = mul(unity_ObjectToWorld, v.vertex);
v.vertex.xyz += normalize(v.normal.xyz) * _Offset;
o.vertex = UnityObjectToClipPos(v.vertex);
o.vertex.x += sin(_Time.y * _WiggleX) * _WiggleDist;
o.vertex.y -= sin(_Time.y * _WiggleY) * _WiggleDist;
o.color = v.color;
o.texcoord = TRANSFORM_TEX(v.texcoord,_MainTex);
o.projPos = ComputeScreenPos (o.vertex);
COMPUTE_EYEDEPTH(o.projPos.z);
#if UNITY_UV_STARTS_AT_TOP
float scale = -1.0;
#else
float scale = 1.0;
#endif
o.uvgrab.xy = (float2(o.vertex.x, o.vertex.y*scale) + o.vertex.w) * 0.5;
o.uvgrab.zw = o.vertex.zw;
return o;
}
fixed4 frag (v2f i) : SV_Target
{
i.texcoord.y += _Time.x * _Speed;
float4 packedTex = tex2D(_MainTex, i.texcoord);
float local1 = packedTex.z * 2.4;
float2 local2 = packedTex.rg * 2.25;
packedTex.rg = local1 * local2;
half2 bump = UnpackNormal(packedTex).rg;
float2 offset = bump * _Dist * _GrabTexture_TexelSize.xy;
i.uvgrab.xy = offset * i.uvgrab.z + i.uvgrab.xy;
half4 col = tex2Dproj( _GrabTexture, UNITY_PROJ_COORD(i.uvgrab));
return col;
}
ENDCG
}
}
}
}
Thank you in advance.
Well if you want it completely opaque, that's easy.
Right before the return statement, just add:
col.a = 1;
If you want it to actually care about the alpha slider in the input color, then do this:
col.a = _TintColor.a;
In the first case, _TintColor isn't used at all (there are no other references to it besides its declaration, of which there are 2 at the top (you should remove one of them) and one inside the CGprogram block which links the first one to a property that can be used within the CGprogram).
If you actually want to tint things, you'll have to multiply the _TintColor with the computed color coming from the texture and effect. There are multiple ways of doing it, for example:
col.r *= _TintColor.r;
col.g *= _TintColor.g;
col.b *= _TintColor.b;
How you want to handle it is up to you.
I am making a 360 viewer in unity, to view a 360 photo I used to have a cubemap attached to a skybox, and it worked great. But the weight of the cubemaps forced me to switch to textures.
All of the 360 viewer tutorials say to just put a sphere with a shader on it, and put the camera inside. When I do this, it doesn't work very well, because when I look to the top or bottom, I see the image warped like so: (The chairs are suppossed to look normal)
It did not happen when I used a skybox.
Does any one know why is this happening?
Thank you very much!
The shader you choose does not handle equirectangular distortion very well. At the poles (top and bottom) of the sphere much image information has to be mapped on very small space which leads to the artifacts you are seeing.
You can write a specialized shader to improve the coordinate mapping from your equirectangular image onto the sphere. At the Unity forums a specialized shader has been posted.
Shader "Custom/Equirectangular" {
Properties {
_Color ("Main Color", Color) = (1,1,1,1)
_MainTex ("Diffuse (RGB) Alpha (A)", 2D) = "gray" {}
}
SubShader{
Pass {
Tags {"LightMode" = "Always"}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
#pragma glsl
#pragma target 3.0
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct v2f
{
float4 pos : SV_POSITION;
float3 normal : TEXCOORD0;
};
v2f vert (appdata v)
{
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
o.normal = v.normal;
return o;
}
sampler2D _MainTex;
#define PI 3.141592653589793
inline float2 RadialCoords(float3 a_coords)
{
float3 a_coords_n = normalize(a_coords);
float lon = atan2(a_coords_n.z, a_coords_n.x);
float lat = acos(a_coords_n.y);
float2 sphereCoords = float2(lon, lat) * (1.0 / PI);
return float2(sphereCoords.x * 0.5 + 0.5, 1 - sphereCoords.y);
}
float4 frag(v2f IN) : COLOR
{
float2 equiUV = RadialCoords(IN.normal);
return tex2D(_MainTex, equiUV);
}
ENDCG
}
}
FallBack "VertexLit"
}
Again, it's not my own code but I tested it on android devices and as a standalone PC version. It results in very smooth poles.
Please note: This shader does not flip normals of your sphere. So if you want your camera to sit inside the sphere you have to invert its normals with a 3d program or with the shader. Try adding Cull Front after line 9 above and the shader will apply its texture to the "wrong" side of the model.
I'm a beginner and I had to do a lot just to understand this thread. This is what worked for me. I just combined the answers and put it in one script. I'm pretty sure I will forget this in a few weeks time, so putting it here for posterity.
Shader "Custom/Equirectangular" {
Properties {
_Color ("Main Color", Color) = (1,1,1,1)
_MainTex ("Diffuse (RGB) Alpha (A)", 2D) = "gray" {}
}
SubShader{
Pass {
Tags {"LightMode" = "Always"}
Cull Front
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
#pragma glsl
#pragma target 3.0
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct v2f
{
float4 pos : SV_POSITION;
float3 normal : TEXCOORD0;
};
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.normal = v.normal;
return o;
}
sampler2D _MainTex;
#define PI 3.141592653589793
inline float2 RadialCoords(float3 a_coords)
{
float3 a_coords_n = normalize(a_coords);
float lon = atan2(a_coords_n.z, a_coords_n.x);
float lat = acos(a_coords_n.y);
float2 sphereCoords = float2(lon, lat) * (1.0 / PI);
return float2(1 - (sphereCoords.x * 0.5 + 0.5), 1 - sphereCoords.y);
}
float4 frag(v2f IN) : COLOR
{
float2 equiUV = RadialCoords(IN.normal);
return tex2D(_MainTex, equiUV);
}
ENDCG
}
}
FallBack "VertexLit"
}
This is another shader code.
'Shader "Flip Normals" {
Properties {
_MainTex ("Base (RGB)", 2D) = "white" {}
}
SubShader {
Tags { "RenderType" = "Opaque" }
Cull Front
CGPROGRAM
#pragma surface surf Lambert vertex:vert
sampler2D _MainTex;
struct Input {
float2 uv_MainTex;
float4 color : COLOR;
};
void vert(inout appdata_full v)
{
v.normal.xyz = v.normal * -1;
}
void surf (Input IN, inout SurfaceOutput o) {
fixed3 result = tex2D(_MainTex, IN.uv_MainTex);
o.Albedo = result.rgb;
o.Alpha = 1;
}
ENDCG
}
Fallback "Diffuse"
}`
I'm new to writing shaders and I'm working on a practice geometry shader. The goal of the shader is to make the "normal" pass produce transparent pixels such that the object is invisible, while the "geometry" pass will take each triangle, redraw in the same place as the original but colored black. Thus, I expect the output to be the original object, but black. However, my geometry pass seems to not produce any output I can see:
Here is the code I currently have for the shader.
Shader "Outlined/Silhouette2" {
Properties
{
_Color("Color", Color) = (0,0,0,1)
_MainColor("Main Color", Color) = (1,1,1,1)
_Thickness("Thickness", float) = 4
_MainTex("Main Texture", 2D) = "white" {}
}
SubShader
{
Tags{ "Queue" = "Geometry" "IgnoreProjector" = "True" "RenderType" = "Transparent" }
Blend SrcAlpha OneMinusSrcAlpha
Cull Back
ZTest always
Pass
{
Stencil{
Ref 1
Comp always
Pass replace
}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct v2g
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
struct g2f
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
float4 _LightColor0;
sampler2D _MainTex;
float4 _MainColor;
v2g vert(appdata_base v)
{
v2g OUT;
OUT.pos = mul(UNITY_MATRIX_MVP, v.vertex);
OUT.uv = v.texcoord;
OUT.normals = v.normal;
OUT.viewT = ObjSpaceViewDir(v.vertex);
return OUT;
}
half4 frag(g2f IN) : COLOR
{
//this renders nothing, if you want the base mesh and color
//fill this in with a standard fragment shader calculation
float4 texColor = tex2D(_MainTex, IN.uv);
float3 normal = mul(float4(IN.normals, 0.0), _Object2World).xyz;
float3 normalDirection = normalize(normal);
float3 lightDirection = normalize(_WorldSpaceLightPos0.xyz * -1);
float3 diffuse = _LightColor0.rgb * _MainColor.rgb * max(0.0, dot(normalDirection, lightDirection));
texColor = float4(diffuse,1) * texColor;
//
//return texColor;
return float4(0, 0, 0, 0);
}
ENDCG
}
Pass
{
Stencil{
Ref 0
Comp equal
}
CGPROGRAM
#include "UnityCG.cginc"
#pragma target 4.0
#pragma vertex vert
#pragma geometry geom
#pragma fragment frag
half4 _Color;
float _Thickness;
struct v2g
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float4 local_pos: TEXCOORD1;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
struct g2f
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
v2g vert(appdata_base v)
{
v2g OUT;
OUT.pos = mul(UNITY_MATRIX_MVP, v.vertex);
OUT.local_pos = v.vertex;
OUT.uv = v.texcoord;
OUT.normals = v.normal;
OUT.viewT = ObjSpaceViewDir(v.vertex);
return OUT;
}
[maxvertexcount(12)]
void geom(triangle v2g IN[3], inout TriangleStream<g2f> triStream)
{
g2f OUT;
OUT.pos = IN[0].pos;
OUT.uv = IN[0].uv;
OUT.viewT = IN[0].viewT;
OUT.normals = IN[0].normals;
triStream.Append(OUT);
OUT.pos = IN[1].pos;
OUT.uv = IN[1].uv;
OUT.viewT = IN[1].viewT;
OUT.normals = IN[1].normals;
triStream.Append(OUT);
OUT.pos = IN[2].pos;
OUT.uv = IN[2].uv;
OUT.viewT = IN[2].viewT;
OUT.normals = IN[2].normals;
triStream.Append(OUT);
}
half4 frag(g2f IN) : COLOR
{
_Color.a = 1;
return _Color;
}
ENDCG
}
}
FallBack "Diffuse"
}
Since all I'm doing is taking the same triangles I've been given and appending them to the triangle stream I'm not sure what I could be doing wrong to cause nothing to appear. Anyone know why this is happening?
I notice that you don't call
triStrem.RestartStrip(); after feeding in 3 vertices of a triangle in your geometry shader.
This informs the stream that a particular triangle strip has ended, and a new triangle strip will begin. If you don't do this, each (single) vertex passed to the stream will add on to the existing triangle strip, using the triangle strip pattern: https://en.wikipedia.org/wiki/Triangle_strip
I'm fairly new to geo-shaders myself, so I'm not sure if this is your issue or not, I don't THINK the RestartStrip function is called automatically at the end of each geomerty-shader, but have not tested this. Rather I think it gets called automatically only when you you reach the maxvertexcount. For a single triangle, I would set the maxvertexcount to 3, not the 12 you have now.
(I also know it can be tough to get ANY shader answers so figgured I'd try to help.)