Shader "Custom/Geometry/Wireframe"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_WireframeVal ("Wireframe width", Range(0.000, 0.035)) = 0.05
_Color ("color", color) = (1, 1, 1, 1)
_BackColor ("Back color", color) = (1, 1, 1, 1)
}
SubShader
{
Tags { "RenderType"="Opaque" "Glowable" = "True" }
Pass
{
Cull Back
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma geometry geom
#include "UnityCG.cginc"
struct v2g {
float4 pos : SV_POSITION;
};
struct g2f {
float4 pos : SV_POSITION;
float3 center : TEXCOORD0;
};
v2g vert(appdata_base v) {
v2g o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
return o;
}
[maxvertexcount(3)]
void geom(triangle v2g IN[3], inout TriangleStream<g2f> triStream) {
float2 p0 = IN[0].pos.xy / IN[0].pos.w;
float2 p1 = IN[1].pos.xy / IN[1].pos.w;
float2 p2 = IN[2].pos.xy / IN[2].pos.w;
float2 edge0 = p1 - p0;
float2 edge1 = p2 - p1;
float2 edge2 = p0 - p2;
float area = abs(edge1.x * edge2.y - edge1.y * edge2.x);
g2f o;
o.pos = IN[0].pos;
o.center = float3(area/length(edge1) , 0, 0);
triStream.Append(o);
o.pos = IN[1].pos;
o.center = float3(0, 0, area/length(edge2) );
triStream.Append(o);
o.pos = IN[2].pos;
o.center = float3(0, area/length(edge0), 0);
triStream.Append(o);
}
float _WireframeVal;
fixed4 _BackColor;
float4 _Color;
fixed4 frag(g2f i) : SV_Target
{
if(min(i.center.x ,(min(i.center.y,i.center.z))) > _WireframeVal)
{
discard;
}
return _BackColor;
}
ENDCG
}
}
}
Here is my wireframe shader code
i am not getting uniform edges throughout
Also when i reduce the width almost to zero (0.0001) the some pixels of wires are not drawn what can i do for that ? I want to make the wireframe shader as unity's built it wireframe mode how can i achieve that ?
Actually, that is uniform thickness. You're just thinking about what "thickness" is in this context, due to how the object is being drawn.
On the edge of the cube facing the camera where it looks like it's thicker than the other edges. Well, you're half right. There is more white pixels drawn there.
But the reason for that is because there are two edges there! And each one is contributing the desired value towards the thickness of the wireframe.
Both the blue outlined face and the red outlined face are contributing to the thickness of that edge, whereas on a corner where the adjacent face is pointing away from the camera (say, the bottom edge of the blue outlined face), only one face contributes to the overall effect.
This isn't noticeable when the thickness is very small or very far away (because 0.2 pixels + 0.2 pixels = 0.4 pixels, round up: 1 pixel), but does become apparent at higher thickness values, or if you non-uniformly scale the object. For example, I have this in a project I was working on a few weeks ago (mind, mine draws the backfaces too and the alpha depth sorting is off):
The reason for this is because where the "edges" are is done through computing the barycentric coordinates of the triangle, which are just an approximation, and will give skewed results if the triangles aren't equilateral.
Related
I have found a few unique shaders from Shadertoy which I would like to impart an experience of 'sky' within models of buildings in Unity AR.
An example might be this one: https://www.shadertoy.com/view/4tdSWr which is just the sky looking up, or this one, https://www.shadertoy.com/view/4tdSWr which has some directional input from the mouse (click and drag) - the HLSL/unity version of this code except for changes to mouse input is at the end of this post.
Right now the clouds feel more like a green screen projection on the model, so there is no implication of direction or horizon if you are looking parallel to the plane the building is on. (ie, if I am standing with the clouds moving from right to left, as I turn left they don't appear to be moving from behind me and receding into the distance)
I have been trying to understand how to use the camera to 'rotate' the shader result so the direction of the camera is used to ensure the direction of the clouds movement. I would also like to use the angle of the camera with respect to the ground plane to impart a horizon when you are looking out towards the walls.
Any insight on how to do this would be great, especially if it is more than just a 'use _WorldSpaceCameraPos' or 'just add UNITY_MATRIX_MVP' as the results I found through excessive googling haven't really been that helpful so far.
The code for the second shader linked, adjusted for HLSL/Unity except for the mouse inputs is at the end of this post.
Shader "Unlit/skybox"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
iChannel0 ("noise-image", 2D) = "noise-image.png" {}
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
sampler2D iChannel0;
float4 _MainTex_ST;
float random(in float2 uv)
{
return tex2D(iChannel0, uv / 64.).r;
}
float noise(in float2 uv)
{
float2 i = floor(uv);
float2 f = frac(uv);
f = f * f * (3. - 2. * f);
float lb = random(i + float2(0., 0.));
float rb = random(i + float2(1., 0.));
float lt = random(i + float2(0., 1.));
float rt = random(i + float2(1., 1.));
return lerp(lerp(lb, rb, f.x),
lerp(lt, rt, f.x), f.y);
}
#define OCTAVES 8
float fbm(in float2 uv)
{
float value = 0.;
float amplitude = .5;
for (int i = 0; i < OCTAVES; i++)
{
value += noise(uv) * amplitude;
amplitude *= .5;
uv *= 2.;
}
return value;
}
float3 Sky(in float3 ro, in float3 rd)
{
const float SC = 1e5;
// Calculate sky plane
float dist = (SC - ro.y) / rd.y;
float2 p = (ro + dist * rd).xz;
p *= 1.2 / SC;
// from iq's shader, https://www.shadertoy.com/view/MdX3Rr
float3 lightDir = normalize(float3(-.8, .15, -.3));
float sundot = clamp(dot(rd, lightDir), 0.0, 1.0);
float3 cloudCol = float3(1.,1.0,1.0);
//float3 skyCol = float3(.6, .71, .85) - rd.y * .2 * float3(1., .5, 1.) + .15 * .5;
float3 skyCol = float3(0.3,0.5,0.85) - rd.y*rd.y*0.5;
skyCol = lerp( skyCol, mul(0.85, float3(0.7,0.75,0.85)), pow( 1.0 - max(rd.y, 0.0), 4.0 ) );
// sun
float3 sun = mul(mul(0.25 , float3(1.0,0.7,0.4)) , pow( sundot,5.0 ));
sun += mul(mul(0.25 , float3(1.0,0.8,0.6)) , pow( sundot,64.0 ));
sun += mul(mul(0.2 , float3(1.0,0.8,0.6)) , pow( sundot,512.0 ));
skyCol += sun;
// clouds
float t = mul(_Time.y , 0.1);
float den = fbm(float2(p.x - t, p.y - t));
skyCol = lerp( skyCol, cloudCol, smoothstep(.4, .8, den));
// horizon
skyCol = lerp( skyCol, mul(0.68 , float3(.418, .394, .372)), pow( 1.0 - max(rd.y, 0.0), 16.0 ) );
return skyCol;
}
float3x3 setCamera( in float3 ro, in float3 ta, float cr )
{
float3 cw = normalize(ta-ro);
float3 cp = float3(sin(cr), cos(cr),0.0);
float3 cu = normalize( cross(cw,cp) );
float3 cv = normalize( cross(cu,cw) );
return float3x3( cu, cv, cw );
}
void mainImage( out float4 fragColor, in float2 fragCoord )
{
float2 uv = fragCoord.xy / _ScreenParams.xy;
uv -= 0.5;
uv.x *= _ScreenParams.x / _ScreenParams.y;
float2 mouse = iMouse.xy/_ScreenParams.xy;
float3 ro = float3(0.0, 0.0, 0.0);
float3 ta = float3(cos(mul(mouse.x , 6.28)), mul(mouse.y , 2.0), sin(mul(mouse.x , 6.28)));
float3x3 cam = setCamera(ro, ta, 0.0);
float3 rd = normalize(mul(cam , float3(uv, 1.0)));
float3 col = Sky(ro, rd);
fragColor = float4(float3(col),1.0);
}
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
// sample the texture
fixed4 col = tex2D(_MainTex, i.uv);
// apply fog
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
ENDCG
}
}
}
Hi is it possible to render a 3d render texture on a custom shader raymarching like a 3dtexture?
I use a 3D render texture because I calculate and set the color of the 3D volume in a compute shader. I set the rendertexture 3D as shown below:
output3DRenderTexture= new RenderTexture(m_CubeDim.x, m_CubeDim.y, 0, thisTexFormat);
outpuoutput3DRenderTextureRendTex.enableRandomWrite = true;
output3DRenderTexture.dimension = UnityEngine.Rendering.TextureDimension.Tex3D;
output3DRenderTexture.volumeDepth = m_CubeDim.z;
output3DRenderTexture.Create();
I populate the 3D RenderTexture data in a a compute shader and GetData helps me confirm the 3d render texture has all the correct color data.
I can successfully render if I replace the 3D renderTex in the custom shader's sampler3D for a 3DTexture I create with the Tex2D slices.
cubeRenderer.material.SetTexture("_MainTex", output3DRenderTexture);//this does not render
versus
cubeRenderer.material.SetTexture("_MainTex", outputTexture3D);//this renders
This post in 2016 seems to suggest it's possible to render 3d render textures in custom shaders but it may now be outdated, it doesn't work for me and no error shows either.
It seems to me there maybe a significant performance hit if create the Textures2D slices in GPU, carry on with the creation of the Texture3D on CPU and re-send this Tex3D to GPU for the custom shader to consume it. After all the 3D volume already existed in GPU except as RenderTexture set as Tex3D. Thank you!
Shader:
#include "UnityCG.cginc"
#define ITERATIONS 100
#define PI2 6.28318530718
half4 _Color;
sampler3D _MainTex;
half _Intensity, _Threshold;
half3 _SliceMin, _SliceMax;
float4x4 _AxisRotationMatrix;
float _Angle;
struct Ray {
float3 origin;
float3 dir;
};
struct AABB {
float3 min;
float3 max;
};
// https http.download.nvidia.com/developer/presentations/2005/GDC/Audio_and_Slides/VolumeRendering_files/GDC_2_files/GDC_2005_VolumeRenderingForGames_files/Slide0073.htm
bool intersect(Ray r, AABB aabb, out float t0, out float t1)
{
float3 invR = 1.0 / r.dir;
float3 tbot = invR * (aabb.min - r.origin);
float3 ttop = invR * (aabb.max - r.origin);
float3 tmin = min(ttop, tbot);
float3 tmax = max(ttop, tbot);
float2 t = max(tmin.xx, tmin.yz);
t0 = max(t.x, t.y);
t = min(tmax.xx, tmax.yz);
t1 = min(t.x, t.y);
return t0 <= t1;
}
float3 get_uv(float3 p) {
return (p + 0.5);
}
float sample_volume(float3 uv, float3 p)
{
float v = tex3D(_MainTex, uv).r * _Intensity;
return v;
}
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
float3 world : TEXCOORD1;
float3 local : TEXCOORD2;
};
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
o.world = mul(unity_ObjectToWorld, v.vertex).xyz;
o.local = v.vertex.xyz;
return o;
}
fixed4 frag(v2f i) : SV_Target
{
Ray ray;
ray.origin = i.local;
// world space direction to object space
float3 dir = (i.world - _WorldSpaceCameraPos);
ray.dir = normalize(mul(unity_WorldToObject, dir));
AABB aabb;
aabb.min = float3(-0.5, -0.5, -0.5);
aabb.max = float3(0.5, 0.5, 0.5);
float tnear;
float tfar;
intersect(ray, aabb, tnear, tfar);
tnear = max(0.0, tnear);
// float3 start = ray.origin + ray.dir * tnear;
float3 start = ray.origin;
float3 end = ray.origin + ray.dir * tfar;
float dist = abs(tfar - tnear);
float step_size = dist / float(ITERATIONS);
float3 ds = normalize(end - start) * step_size;
float4 dst = float4(0, 0, 0, 0);
float3 p = start;
[unroll]
for (int iter = 0; iter < ITERATIONS; iter++)
{
float3 uv = get_uv(p);
float v = sample_volume(uv, p);
float4 src = float4(v, v, v, v);
src.a *= 0.5;
src.rgb *= src.a;
// blend
dst = (1.0 - dst.a) * src + dst;
p += ds;
if (dst.a > _Threshold) break;
}
return saturate(dst) * _Color;
}
#endif
I am using the next script for a painting asset found on this website.
As you render the faces of your mesh in the uv space of the mesh, you are reconstruction the islands one triangle at the time. On the edges of the island, it can be that due to underestimation the rasterizer doesn’t consider a pixel which is actually in the island. For these pixels, no pixel shader will be executed and you will be left over with a crease. (text and image taken form the website)
The basic idea is that every frame, after the paint texture has been updated, I run a shader over the entire texture, and using a filter and a pre baked mask of the uv islands, extend the islands outwards.
The problem is that it does not work on iPhone. There is no error, no weird coloring, it is like that script does not get applied. I am not sure where the problem is coming from..maybe the for loops or the command buffers used in c#.
Shader "Unlit/FixIlsandEdges"
{
SubShader
{
// =====================================================================================================================
// TAGS AND SETUP ------------------------------------------
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
// =====================================================================================================================
// DEFINE AND INCLUDE ----------------------------------
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
// =====================================================================================================================
// DECLERANTIONS ----------------------------------
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
uniform float4 _MainTex_TexelSize;
sampler2D _IlsandMap;
// =====================================================================================================================
// VERTEX FRAGMENT ----------------------------------
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
fixed4 frag (v2f i) : SV_Target
{
fixed4 col = tex2D(_MainTex, i.uv);
float map = tex2D(_IlsandMap,i.uv);
float3 average = col;
if (map.x < 0.2) { // only take an average if it is not in a uv ilsand
int n = 0;
average = float3(0., 0., 0.);
for (float x = -1.5; x <= 1.5; x++) {
for (float y = -1.5; y <= 1.5; y++) {
float3 c = tex2D(_MainTex, i.uv + _MainTex_TexelSize.xy*float2(x, y));
float m = tex2D(_IlsandMap, i.uv + _MainTex_TexelSize.xy*float2(x, y));
n += step(0.1, m);
average += c * step(0.1, m);
}
}
average /= n;
}
col.xyz = average;
return col;
}
ENDCG
}
}
}
I've read somewhere that using tex2D in for loops is a big no no, so I changed that to a tex2Dlod but the same thing happens.
You can check out the Git project here
I am completely lost on this one and google isn't very helpful. Thank you for any comments.
I'm trying to create a shader for an image material that draws a circle regardless of the aspect ratio of the image itself.
In Shadertoy (hlsl) I can do the following to create a round circle, regardless of aspect ratio:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
vec2 uv = fragCoord/iResolution.xy;
uv -= 0.5;
uv.x *= iResolution.x/iResolution.y; // < this compensates for the aspect ratio
float l = length(uv);
float s = smoothstep(0.5, 0.55, l);
vec4 col = vec4(s);
fragColor = vec4(col);
}
Which gives the following output
If I remove the line uv.x *= iResolution.x/iResolution.y; the circle will warp based on the current aspect ratio.
Now I want to create the same effect in Unity, so I tried the (to me seemingly) same approach.
_MainTex_TexelSize contains the width/height of the texture (from the docs):
{TextureName}_TexelSize - a float4 property contains texture size information:
- x contains 1.0/width
- y contains 1.0/height
- z contains width
- w contains height
Shader "Unlit/Shader"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Blend SrcAlpha OneMinusSrcAlpha
Cull off
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
float4 _MainTex_TexelSize;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
o.uv -= 0.5;
o.uv.x *= _MainTex_TexelSize.z / _MainTex_TexelSize.w;
return o;
}
float DrawCircle(float2 uv, float radius, float fallOff)
{
float d = length(uv);
return smoothstep(radius, fallOff, d);
}
fixed4 frag (v2f i) : SV_Target
{
fixed4 col = tex2D(_MainTex, i.uv);
float c = DrawCircle(i.uv, 0.5, 0.55);
col = lerp(col, fixed4(1,0,0,1), c);
return col;
}
ENDCG
}
}
}
The shader compiles as is, but the circle will still stretch based on the aspect ratio of the image.
I thought this may be due to the way the uv's are set up using o.uv = TRANSFORM_TEX(v.uv, _MainTex); so I tried dividing that by the image's size:
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
o.uv / _MainTex_TexelSize.zw;
o.uv -= 0.5;
However this did nothing
and setting up the uv's differently like so
o.uv = v.uv / _MainTex_TexelSize.zw;
o.uv / _MainTex_TexelSize.zw;
o.uv -= 0.5;
results in the circle's center moving to the upper right, but still warp when the aspect ratio change.
What step am I missing/doing wrong to get the aspect ratio independent result like I get in shadertoy?
The aspect ratio of the input texture _MainTex has nothing to do with the aspect ratio of the output*. In the shadertoy example that output is the screen, and iResolution gives you the screen dimensions (the equivalent in unity is _ScreenParams). If you want to draw a quad that is not full screen, you have to match the quad aspect ratio with the _MainTex aspect ratio to use _MainTex_TexelSize, or else just provide the aspect ratio or dimensions in a shader property (that is basically what _ScreenParams does):
float _Aspect;
fixed4 frag(v2f i) : SV_Target
{
i.uv -= .5;
i.uv.x *= _Aspect;
fixed4 col = tex2D(_MainTex, i.uv);
float c = DrawCircle(i.uv, .5, .55);
col = lerp(col, fixed4(1,0,0,1), c);
return col;
}
You could calculate the aspect ratio with derivatives. Here dx and dy are the amount of uv change per pixel. This would also be useful if you want to have, for example, fallOff always be 10 pixels.
fixed4 frag(v2f i) : SV_Target
{
i.uv -= .5;
float dx = ddx(i.uv.x);
float dy = ddy(i.uv.y);
float aspect = dy/dx;
i.uv.x *= aspect;
fixed4 col = tex2D(_MainTex, i.uv);
float c = DrawCircle(i.uv, .5, .55);
col = lerp(col, fixed4(1,0,0,1), c);
return col;
}
I have a shader script with which I bend the world. But the problem is that this script only bends up and down. How to add so that you can both left and right?
// Upgrade NOTE: replaced '_Object2World' with 'unity_ObjectToWorld'
// Upgrade NOTE: replaced '_World2Object' with 'unity_WorldToObject'
Shader "Custom/Bendy Diffuse - Radial"
{
Properties
{
_Color ("Main Color", Color) = (1,1,1,1)
_MainTex ("Base (RGB)", 2D) = "white" {}
_ReflectionColor ("Reflection Tint Color", Color) = (1,1,1,1)
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 200
CGPROGRAM
#pragma surface surf Lambert vertex:vert addshadow
#pragma multi_compile __ HORIZON_WAVES
#pragma multi_compile __ BEND_ON
// Global properties to be set by BendControllerRadial script
uniform half3 _CurveOrigin;
uniform fixed3 _ReferenceDirection;
uniform half _Curvature;
uniform fixed3 _Scale;
uniform half _FlatMargin;
uniform half _HorizonWaveFrequency;
// Per material properties
sampler2D _MainTex;
fixed4 _Color;
fixed4 _ReflectionColor;
struct Input
{
float2 uv_MainTex;
};
half4 Bend(half4 v)
{
half4 wpos = mul (unity_ObjectToWorld, v);
half2 xzDist = (wpos.xz - _CurveOrigin.xz) / _Scale.xz;
half dist = length(xzDist);
fixed waveMultiplier = 1;
dist = max(0, dist - _FlatMargin);
wpos.y -= dist * dist * _Curvature * waveMultiplier;
wpos = mul (unity_WorldToObject, wpos);
return wpos;
}
void vert (inout appdata_full v)
{
#if defined(BEND_ON)
v.vertex = Bend(v.vertex);
#endif
}
void surf (Input IN, inout SurfaceOutput o)
{
fixed4 c = tex2D(_MainTex, IN.uv_MainTex) * _Color;
o.Albedo = c.rgb;
o.Alpha = c.a;
fixed4 detail = tex2D(_MainTex, IN.uv_MainTex);
fixed4 refl = tex2D(_MainTex, IN.uv_MainTex);
o.Albedo = detail.rgb * _Color.rgb;
o.Alpha = 1;
o.Emission = refl.rgb * _ReflectionColor.rgb;
}
ENDCG
}
Fallback "Mobile/Specular/Diffuse"
}
If I understood your question correctly, you are trying to create an illusion of curved Earth. You have xzDist, add this vector multiplied by unmodified Y. So basically if it's left from origin point, bend it left. Points that are hight will bend away from the center more, and the ones below the equator could bend to the center. Can't promise that this will look right though.