I am trying to render only point cloud data inside a 3d box with a shader.
However, a point cloud data shader uses geometry and a clip box shader uses surface, so I do not know how to combine these two together.
Point Cloud Data Shader
https://answers.unity.com/questions/1437520/implementing-a-geometry-shader-for-a-pointcloud.html
///////////////////////////////////////////
Shader "Custom/Pointcloud" {
Properties{
_Radius("Sphere Radius", float) = 1.0
}
SubShader{
LOD 200
Tags { "RenderType" = "Opaque" }
//if you want transparency
//Tags { "Queue" = "Transparent" "RenderType" = "Transparent" }
//Blend SrcAlpha OneMinusSrcAlpha
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma geometry geom
#pragma target 4.0 // Use shader model 3.0 target, to get nicer looking lighting
#include "UnityCG.cginc"
struct vertexIn {
float4 pos : POSITION;
float4 color : COLOR;
};
struct vertexOut {
float4 pos : SV_POSITION;
float4 color : COLOR0;
float3 normal : NORMAL;
float r : TEXCOORD0; // not sure if this is good to do lol
};
struct geomOut {
float4 pos : POSITION;
float4 color : COLO0R;
float3 normal : NORMAL;
};
float rand(float3 p) {
return frac(sin(dot(p.xyz, float3(12.9898, 78.233, 45.5432))) * 43758.5453);
}
float2x2 rotate2d(float a) {
float s = sin(a);
float c = cos(a);
return float2x2(c,-s,s,c);
}
//Vertex shader: computes normal wrt camera
vertexOut vert(vertexIn i) {
vertexOut o;
o.pos = UnityObjectToClipPos(i.pos);
o.color = i.color;
o.normal = ObjSpaceViewDir(o.pos);
o.r = rand(i.pos);// calc random value based on object space pos
// from world space instead (particles will spin when mesh moves, kinda funny lol)
//o.r = rand(mul(unity_ObjectToWorld,i.pos));
return o;
}
float _Radius;
//Geometry shaders: Creates an equilateral triangle with the original vertex in the orthocenter
[maxvertexcount(3)]
void geom(point vertexOut IN[1], inout TriangleStream<geomOut> OutputStream)
{
float2 dim = float2(_Radius,_Radius);
float2 p[3]; // equilateral tri
p[0] = float2(-dim.x, dim.y * .57735026919);
p[1] = float2(0., -dim.y * 1.15470053838);
p[2] = float2(dim.x, dim.y * .57735026919);
float2x2 r = rotate2d(IN[0].r * 3.14159);
geomOut OUT;
// OUT.color = IN[0].color;
OUT.color = IN[0].color;
OUT.normal = IN[0].normal;
for (int i = 0; i < 3; i++) {
p[i] = mul(r,p[i]); // apply rotation
p[i].x *= _ScreenParams.y / _ScreenParams.x; // make square
OUT.pos = IN[0].pos + float4(p[i],0,0) / 2.;
OutputStream.Append(OUT);
}
}
float4 frag(geomOut i) : COLOR
{
return i.color;
// could do some additional lighting calculation here based on normal
}
ENDCG
}
}
FallBack "Diffuse"
}
ClibBox shader
https://answers.unity.com/questions/1762908/render-only-whats-inside-a-box.html
Shader "Custom/ClipBox" {
Properties{
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Glossiness("Smoothness", Range(0,1)) = 0.5
_Metallic("Metallic", Range(0,1)) = 0.0
}
SubShader{
Tags { "RenderType" = "Opaque" }
LOD 200
CGPROGRAM
#pragma surface surf Standard fullforwardshadows addshadow
#pragma target 3.0
sampler2D _MainTex;
half _Glossiness;
half _Metallic;
float4x4 _WorldToBox;
struct Input {
float2 uv_MainTex;
float3 worldPos;
};
void surf(Input IN, inout SurfaceOutputStandard o) {
float3 boxPosition = mul(_WorldToBox, float4(IN.worldPos, 1));
clip(boxPosition + 0.5);
clip(0.5 - boxPosition);
fixed4 c = tex2D(_MainTex, IN.uv_MainTex);
o.Albedo = c.rgb;
o.Metallic = _Metallic;
o.Smoothness = _Glossiness;
o.Alpha = c.a;
o.Alpha = 0.0f;
}
ENDCG
}
FallBack "Diffuse"
}
To get the world position of your pixel in the fragment shader you have to pass it through your vertex and geometry shader:
vertex => geometry
struct vertexOut {
float4 pos : SV_POSITION;
float4 color : COLOR0;
float3 normal : NORMAL;
float r : TEXCOORD0; // not sure if this is good to do lol
float3 worldPos : TEXCOORD1;
};
vertexOut vert(vertexIn i) {
vertexOut o;
...
// calculate world position
o.worldPos = mul(unity_ObjectToWorld, i.pos);
return o;
}
geometry => fragment
(Since you simply create a small triangle around the vertex you can approximate the new vertices' world positions with the one from the original vertex. If this is undesirable you have to calculate 3 separate world positions inside your loop.)
struct geomOut {
float4 pos : POSITION;
float4 color : COLO0R;
float3 normal : NORMAL;
float3 worldPos : TEXCOORD0;
};
void geom(point vertexOut IN[1], inout TriangleStream<geomOut> OutputStream) {
...
for (int i = 0; i < 3; i++) {
p[i] = mul(r,p[i]); // apply rotation
p[i].x *= _ScreenParams.y / _ScreenParams.x; // make square
OUT.pos = IN[0].pos + float4(p[i],0,0) / 2.;
// Simply use the input vertex world position. This might result in unclear cube edges.
OUT.worldPos = IN[0].worldPos;
OutputStream.Append(OUT);
}
}
Now you can add the clipping code
float3 boxPosition = mul(_WorldToBox, float4(IN.worldPos, 1));
clip(boxPosition + 0.5);
clip(0.5 - boxPosition);
and the _WorldToBox property to your fragment shader.
You also need the c# scipt passing the matrix to the shader.
Related
I'm currently stuck in a shader that i'm writing.
I'm trying to create a rain shader.
I have set up 3 particle system which simulates rain, and a
camera to look at this simulation. The camera view is what I
use as texture. In my shader I am now trying to make a normal map
from that texture map, but I don't know how to do it.
Shader "Unlit/Rain"
{
Properties
{
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Color("Color", Color) = (1,1,1,1)
_NormalIntensity("NormalIntensity",Float) = 1
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#include "Lighting.cginc"
#include "AutoLight.cginc"
struct VertexInput {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float4 normal : NORMAL;
float3 tangent : TANGENT;
};
struct VertexOutput {
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float2 uv1 : TEXCOORD1;
float4 normals : NORMAL;
float3 tangentSpaceLight: TANGENT;
};
sampler2D _MainTex;
float4 _MainTex_ST;
half4 _Color;
float _NormalIntensity;
VertexOutput vert(VertexInput v) {
VertexOutput o;
o.normals = v.normal;
o.uv1 = v.uv;
o.vertex = UnityObjectToClipPos( v.vertex );
// o.uv = TRANSFORM_TEX( v.uv, _MainTex ); // used for texture
return o;
}
float4 frag(VertexOutput i) : COLOR{
float4 col2 = tex2D(_MainTex, i.uv1);
return col2 * i.normals * 5;
}
ENDCG
}
}
}
This is what the camera sees. I set the TargetTexture for this camera to be a texture I created.
In my shader I then put that texture as an albedo property
So what I wanna do is now find the normal for that texture to create a bumpmap.
It looks like your "TargetTexture" is giving you back a height map. Here is a post I found about how to turn a height map into a normal map. I've mashed the code you had originally together with the core of that forum post and output the normals as color so you can test and see how this works:
Shader "Unlit/HeightToNormal"
{
Properties
{
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Color("Color", Color) = (1,1,1,1)
_NormalIntensity("NormalIntensity",Float) = 1
_HeightMapSizeX("HeightMapSizeX",Float) = 1024
_HeightMapSizeY("HeightMapSizeY",Float) = 1024
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#include "Lighting.cginc"
#include "AutoLight.cginc"
struct VertexInput {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float4 normal : NORMAL;
float3 tangent : TANGENT;
};
struct VertexOutput {
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float2 uv1 : TEXCOORD1;
float4 normals : NORMAL;
//float3 tangentSpaceLight: TANGENT;
};
sampler2D _MainTex;
float4 _MainTex_ST;
half4 _Color;
float _NormalIntensity;
float _HeightMapSizeX;
float _HeightMapSizeY;
VertexOutput vert(VertexInput v) {
VertexOutput o;
o.uv = TRANSFORM_TEX( v.uv, _MainTex ); // used for texture
o.uv1 = v.uv;
o.normals = v.normal;
o.vertex = UnityObjectToClipPos(v.vertex);
return o;
}
float4 frag(VertexOutput i) : COLOR
{
float me = tex2D(_MainTex,i.uv1).x;
float n = tex2D(_MainTex,float2(i.uv1.x, i.uv1.y + 1.0 / _HeightMapSizeY)).x;
float s = tex2D(_MainTex,float2(i.uv1.x, i.uv1.y - 1.0 / _HeightMapSizeY)).x;
float e = tex2D(_MainTex,float2(i.uv1.x + 1.0 / _HeightMapSizeX,i.uv1.y)).x;
float w = tex2D(_MainTex,float2(i.uv1.x - 1.0 / _HeightMapSizeX,i.uv1.y)).x;
// defining starting normal as color has some interesting effects, generally makes this more flexible
float3 norm = _Color;
float3 temp = norm; //a temporary vector that is not parallel to norm
if (norm.x == 1)
temp.y += 0.5;
else
temp.x += 0.5;
//form a basis with norm being one of the axes:
float3 perp1 = normalize(cross(i.normals,temp));
float3 perp2 = normalize(cross(i.normals,perp1));
//use the basis to move the normal i its own space by the offset
float3 normalOffset = -_NormalIntensity * (((n - me) - (s - me)) * perp1 + ((e - me) - (w - me)) * perp2);
norm += normalOffset;
norm = normalize(norm);
// it's also interesting to output temp, perp1, and perp1, or combinations of the float samples.
return float4(norm, 1);
}
ENDCG
}
}
}
To generate a normal map from a height map, you're trying to use the oriented rate of change in your height map to come up with a normal vector which can be represented using 3 float values (or color channels, if it's an image). You can sample the point of image you are on, and then small steps away from that point in four cardinal directions. Using the cross product to guarantee orthogonality you can define a basis. Using your oriented steps on your image, you can scale the two basis vectors and add them together to find a "normal offset", which is the 3D representation approximating the oriented change in value on your heightmap. Basically it's your normal.
You can see the effects of me playing with normal intensity here, and the "normal color" here. When this looks right for your use case, you can try using normals as normals instead of colored output.
Some tweaking of values will probably still be required. Good luck!
I have a shader that allows me to create and rotate a 2 or 3 color gradient. My problem was that it was very heavy on the GPU, so I moved this part of the code from the fragment shader to the vertex shader:
fixed4 frag (v2f i) : SV_Target
{
//STARTS HERE
float2 uv = - (i.screenPos.xy / i.screenPos.w - 0.5)*2;
fixed3 c;
#if _BG_COLOR_GRADIENT2
c = lerp(_BgColor1,_BgColor3,clampValue(rotateUV(uv.xy,_BgColorRotation*PI).y,_BgColorPosition));
#elif _BG_COLOR_GRADIENT3
c = lerp3(_BgColor1,_BgColor2,_BgColor3,clampValue(rotateUV(uv.xy,_BgColorRotation*PI).y,_BgColorPosition),_BgColorPosition3);
#endif
//ENDS HERE
return fixed4(c, i.color.a);
}
Now my shader looks like this:
Shader "Custom/Gradient"
{
Properties
{
[KeywordEnum(Gradient2, Gradient3)] _BG_COLOR ("Color Type", Float) = 1
_Color("Color", Color) = (1, 1, 1, 1)
_BgColor1 ("Start Color",Color) = (0.667,0.851,0.937,1)
_BgColor2 ("Middle Color",Color) = (0.29, 0.8, 0.2,1)
_BgColor3 ("End Color",Color) = (0.29, 0.8, 0.2,1)
[GradientPositionSliderDrawer]
_BgColorPosition ("Gradient Position",Vector) = (0,1,0)
_BgColorRotation ("Gradient Rotation",Range(0,2)) = 0
_BgColorPosition3 ("Middle Size",Range(0,1)) = 0
}
SubShader
{
Tags{ "Queue" = "Background" "IgnoreProjectors"="True" }
Blend SrcAlpha OneMinusSrcAlpha
AlphaTest Greater .01
ColorMask RGB
Cull Off Lighting Off ZWrite Off
BindChannels {
Bind "Color", color
Bind "Vertex", vertex
Bind "TexCoord", texcoord
}
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma shader_feature _BG_COLOR_GRADIENT2 _BG_COLOR_GRADIENT3
#include "UnityCG.cginc"
#include "GradientHelper.cginc"
struct appdata
{
float4 vertex : POSITION;
fixed4 color : COLOR;
};
struct v2f
{
float4 pos : SV_POSITION;
float4 screenPos : TEXCOORD4;
fixed4 color : COLOR;
};
fixed4 _BgColor1;
fixed4 _BgColor2;
fixed4 _BgColor3;
float _BgColorRotation;
float2 _BgColorPosition;
float _BgColorPosition3;
float4 _Color;
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.screenPos = ComputeScreenPos(o.pos);
float2 uv = - (o.screenPos.xy / o.screenPos.w - 0.5)*2;
#if _BG_COLOR_GRADIENT2
o.color = lerp(_BgColor1,_BgColor3,clampValue(rotateUV(uv.xy,_BgColorRotation*PI).y,_BgColorPosition)) * v.color;
#elif _BG_COLOR_GRADIENT3
o.color = lerp3(_BgColor1,_BgColor2,_BgColor3,clampValue(rotateUV(uv.xy,_BgColorRotation*PI).y,_BgColorPosition),_BgColorPosition3) * v.color;
#endif
return o;
}
fixed4 frag (v2f i) : COLOR {
return i.color;
}
ENDCG
}
}
CustomEditor "Background.Editor.BackgroundGradientEditor"
}
(Here is my shader helper):
#ifndef PI
#define PI 3.141592653589793
#endif
#ifndef HALF_PI
#define HALF_PI 1.5707963267948966
#endif
// Helper Funtions
inline float clampValue(float input, float2 limit)
{
float minValue = 1-limit.y;
float maxValue = 1-limit.x;
if(input<=minValue){
return 0;
} else if(input>=maxValue){
return 1;
} else {
return (input - minValue )/(maxValue-minValue);
}
}
inline float2 rotateUV(fixed2 uv, float rotation)
{
float sinX = sin (rotation);
float cosX = cos (rotation);
float2x2 rotationMatrix = float2x2(cosX, -sinX, sinX, cosX);
return mul ( uv, rotationMatrix )/2 + 0.5;
}
inline fixed4 lerp3(fixed4 a, fixed4 b, fixed4 c, float pos, float size){
float ratio2 = 0.5+size*0.5;
float ratio1 = 1-ratio2;
if(pos<ratio1)
return lerp(a,b,pos/ratio1);
else if(pos>ratio2)
return lerp(b,c,(pos-ratio2)/ratio1);
else
return b;
}
#endif
The performance is great now, but the rotation is totally messed up (most noticeable on the 3 color gradient) and I can't seem to figure it out why.
I never understand why people want to make their gradients inside the shader, it is quite limited and not necessarily more performant unless you are changing the values every frame. My best solution for this would be to generate the gradient as a texture on the CPU, with the size 1x128. Use the Gradient class which is provided by Unity, and loop:
Texture2D texture = new Texture2D(128, 1);
Color[] pixels = Color[128];
for (int i = 0; i < 128; i++) {
pixels[i] = gradient.Evaluate(i/127f);
}
texture.SetPixels(pixels);
texture.Apply();
Send it to the shader using:
material.SetTexture("_Gradient", texture)
Then, you can rotate and scroll along this texture all you want using a 2x2 matrix like you did. Just make sure to set texture overflow mode to clamp and not repeat. Remember that you can implement OnValidate() into your behavior to apply value updates in the editor, if you need to update it in build though, you will need to listen to changes some other way.
Using vertex colors would indeed be useful for gradients, since these are interpolated in the hardware... but from my understanding, this is a screen-space effect, and as such you would need the vertices to line up with the actual gradient bands.
I have a shader which generates opacity mask and rotate it.
This is how it looks:
Generated mask looks like this:
I generate a mask via code, but I want to take mask just from a texture2D.
How can I do that?
How do I change mask generating by only texture2D?
Code of my shader:
Shader "Custom/RadialOpacity" {
Properties {
[PerRendererData]_MainTex ("MainTex", 2D) = "white" {}
_Color ("Color", Color) = (1,1,1,1)
_OpacityRotator ("Opacity Rotator", Range(-360, 360)) = -360 // 2 full circles
[HideInInspector]_Cutoff ("Alpha cutoff", Range(0,1)) = 0.5
[MaterialToggle] PixelSnap ("Pixel snap", Float) = 0
}
SubShader {
Tags {
"IgnoreProjector"="True"
"Queue"="Transparent"
"RenderType"="Transparent"
"CanUseSpriteAtlas"="True"
"PreviewType"="Plane"
}
Pass {
Name "FORWARD"
Tags {
"LightMode"="ForwardBase"
}
Blend One OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile _ PIXELSNAP_ON
#include "UnityCG.cginc"
#pragma target 3.0
uniform sampler2D _MainTex;
uniform float4 _MainTex_ST;
uniform float4 _Color;
uniform float _OpacityRotator;
static const float TAU = float(6.283185); // это 2 * PI
struct VertexInput {
float4 vertex : POSITION;
float2 texcoord0 : TEXCOORD0;
};
struct VertexOutput {
float4 pos : SV_POSITION;
float2 uv0 : TEXCOORD0;
float3 normalDir : TEXCOORD2;
};
VertexOutput vert (VertexInput v) {
VertexOutput o = (VertexOutput)0;
o.uv0 = v.texcoord0;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex );
#ifdef PIXELSNAP_ON
o.pos = UnityPixelSnap(o.pos);
#endif
return o;
}
float4 frag(VertexOutput i) : COLOR {
i.normalDir = normalize(i.normalDir);
float4 _MainTex_var = tex2D(_MainTex,TRANSFORM_TEX(i.uv0, _MainTex));
float2 oStart = (i.uv0 - 0.5);
float2 oVector = float2(-1, -1);
float oRotatorNormalized = _OpacityRotator / 360.0;
float oRotator_ang = oRotatorNormalized * -TAU;
float oRotator_cos = cos(oRotator_ang);
float oRotator_sin = sin(oRotator_ang);
float2x2 oRotationMatrix = float2x2(oRotator_cos, -oRotator_sin, oRotator_sin, oRotator_cos);
float2 oRotatorComponent = mul(oVector * oStart, oRotationMatrix);
/* generating opacity mask BEGIN_SECTION */
float2 oMaskHorizOrVert = atan2(oRotatorComponent.g, oRotatorComponent.r);
float oAtan2MaskNormalized = (oMaskHorizOrVert / TAU) + 0.5;
float oAtan2MaskRotatable = oRotatorNormalized - oAtan2MaskNormalized;
float oWhiteToBlackMask = ceil(oAtan2MaskRotatable);
/* generating opacity mask END_SECTION */
float oFinalMultiply = _MainTex_var.a * max(oAtan2MaskNormalized, ceil(oWhiteToBlackMask));
/*** (Emissive) ***/
float3 finalColor = _MainTex_var.rgb * _Color.rgb * oFinalMultiply;
return fixed4(finalColor, oFinalMultiply);
}
ENDCG
}
}
FallBack "Diffuse"
}
And I want to get something like that:
Properties {
...
_OpacityMask ("OpacityMask", 2D) = "white" {}
...
}
...
float oWhiteToBlackMask = ceil(OpacityMask);
float oFinalMultiply = _MainTex_var.a * max(oAtan2MaskNormalized, ceil(oWhiteToBlackMask));
...
https://forum.unity3d.com/threads/rotation-of-texture-uvs-directly-from-a-shader.150482/
Ok if I understand your question correctly, you want to add a texture 2D parameter and have it rotate. You'll need to rotate the UV coordinates over time, which you can probably accomplish using the code in the link above.
I'm not sure how you get that exact fade at the end with a texture 2D but maybe some clever usage of time you can figure out the animation.
I am a beginner shader worlds, it is much difficult to learn(anyhow i try sometime).I am searching a shader that can sense collision/intersection with other objects so that i can stop its rendering at that intersection point. I currently get this shader it allow to detect the intersection(don't know how) but its mesh rendering doesn't stop.
Shader "Custom/IntersectionHighlights"
{
Properties
{
_RegularColor("Main Color", Color) = (1, 1, 1, .5) //Color when not intersecting
_HighlightColor("Highlight Color", Color) = (1, 1, 1, .5) //Color when intersecting
_HighlightThresholdMax("Highlight Threshold Max", Float) = 1 //Max difference for intersections
}
SubShader
{
Tags { "Queue" = "Transparent" "RenderType"="Transparent" }
Pass
{
Blend SrcAlpha OneMinusSrcAlpha
ZWrite Off
Cull Off
CGPROGRAM
#pragma target 3.0
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
uniform sampler2D _CameraDepthTexture; //Depth Texture
uniform float4 _RegularColor;
uniform float4 _HighlightColor;
uniform float _HighlightThresholdMax;
struct v2f
{
float4 pos : SV_POSITION;
float4 projPos : TEXCOORD1; //Screen position of pos
};
v2f vert(appdata_base v)
{
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
o.projPos = ComputeScreenPos(o.pos);
return o;
}
half4 frag(v2f i) : COLOR
{
float4 finalColor = _RegularColor;
//Get the distance to the camera from the depth buffer for this point
float sceneZ = LinearEyeDepth (tex2Dproj(_CameraDepthTexture,
UNITY_PROJ_COORD(i.projPos)).r);
//Actual distance to the camera
float partZ = i.projPos.z;
//If the two are similar, then there is an object intersecting with our object
float diff = (abs(sceneZ - partZ)) /
_HighlightThresholdMax;
if(diff <= 1)
{
finalColor = lerp(_HighlightColor,
_RegularColor,
float4(diff, diff, diff, diff));
}
half4 c;
c.r = finalColor.r;
c.g = finalColor.g;
c.b = finalColor.b;
c.a = finalColor.a;
return c;
}
ENDCG
}
}
FallBack "VertexLit"
}
I'm new to writing shaders and I'm working on a practice geometry shader. The goal of the shader is to make the "normal" pass produce transparent pixels such that the object is invisible, while the "geometry" pass will take each triangle, redraw in the same place as the original but colored black. Thus, I expect the output to be the original object, but black. However, my geometry pass seems to not produce any output I can see:
Here is the code I currently have for the shader.
Shader "Outlined/Silhouette2" {
Properties
{
_Color("Color", Color) = (0,0,0,1)
_MainColor("Main Color", Color) = (1,1,1,1)
_Thickness("Thickness", float) = 4
_MainTex("Main Texture", 2D) = "white" {}
}
SubShader
{
Tags{ "Queue" = "Geometry" "IgnoreProjector" = "True" "RenderType" = "Transparent" }
Blend SrcAlpha OneMinusSrcAlpha
Cull Back
ZTest always
Pass
{
Stencil{
Ref 1
Comp always
Pass replace
}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct v2g
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
struct g2f
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
float4 _LightColor0;
sampler2D _MainTex;
float4 _MainColor;
v2g vert(appdata_base v)
{
v2g OUT;
OUT.pos = mul(UNITY_MATRIX_MVP, v.vertex);
OUT.uv = v.texcoord;
OUT.normals = v.normal;
OUT.viewT = ObjSpaceViewDir(v.vertex);
return OUT;
}
half4 frag(g2f IN) : COLOR
{
//this renders nothing, if you want the base mesh and color
//fill this in with a standard fragment shader calculation
float4 texColor = tex2D(_MainTex, IN.uv);
float3 normal = mul(float4(IN.normals, 0.0), _Object2World).xyz;
float3 normalDirection = normalize(normal);
float3 lightDirection = normalize(_WorldSpaceLightPos0.xyz * -1);
float3 diffuse = _LightColor0.rgb * _MainColor.rgb * max(0.0, dot(normalDirection, lightDirection));
texColor = float4(diffuse,1) * texColor;
//
//return texColor;
return float4(0, 0, 0, 0);
}
ENDCG
}
Pass
{
Stencil{
Ref 0
Comp equal
}
CGPROGRAM
#include "UnityCG.cginc"
#pragma target 4.0
#pragma vertex vert
#pragma geometry geom
#pragma fragment frag
half4 _Color;
float _Thickness;
struct v2g
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float4 local_pos: TEXCOORD1;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
struct g2f
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 viewT : TANGENT;
float3 normals : NORMAL;
};
v2g vert(appdata_base v)
{
v2g OUT;
OUT.pos = mul(UNITY_MATRIX_MVP, v.vertex);
OUT.local_pos = v.vertex;
OUT.uv = v.texcoord;
OUT.normals = v.normal;
OUT.viewT = ObjSpaceViewDir(v.vertex);
return OUT;
}
[maxvertexcount(12)]
void geom(triangle v2g IN[3], inout TriangleStream<g2f> triStream)
{
g2f OUT;
OUT.pos = IN[0].pos;
OUT.uv = IN[0].uv;
OUT.viewT = IN[0].viewT;
OUT.normals = IN[0].normals;
triStream.Append(OUT);
OUT.pos = IN[1].pos;
OUT.uv = IN[1].uv;
OUT.viewT = IN[1].viewT;
OUT.normals = IN[1].normals;
triStream.Append(OUT);
OUT.pos = IN[2].pos;
OUT.uv = IN[2].uv;
OUT.viewT = IN[2].viewT;
OUT.normals = IN[2].normals;
triStream.Append(OUT);
}
half4 frag(g2f IN) : COLOR
{
_Color.a = 1;
return _Color;
}
ENDCG
}
}
FallBack "Diffuse"
}
Since all I'm doing is taking the same triangles I've been given and appending them to the triangle stream I'm not sure what I could be doing wrong to cause nothing to appear. Anyone know why this is happening?
I notice that you don't call
triStrem.RestartStrip(); after feeding in 3 vertices of a triangle in your geometry shader.
This informs the stream that a particular triangle strip has ended, and a new triangle strip will begin. If you don't do this, each (single) vertex passed to the stream will add on to the existing triangle strip, using the triangle strip pattern: https://en.wikipedia.org/wiki/Triangle_strip
I'm fairly new to geo-shaders myself, so I'm not sure if this is your issue or not, I don't THINK the RestartStrip function is called automatically at the end of each geomerty-shader, but have not tested this. Rather I think it gets called automatically only when you you reach the maxvertexcount. For a single triangle, I would set the maxvertexcount to 3, not the 12 you have now.
(I also know it can be tough to get ANY shader answers so figgured I'd try to help.)