URP Shader, almost the same codes affect differently - unity3d

I'm working on urp shader gerstner wave. Problem was that two side by side ocean plane waves were not continuesly. So, I tought if vertex positions would be world position, problem could fixed. it worked. However there was something I really don't understand.
this code worked
v.positionOS = mul(unity_ObjectToWorld, v.positionOS);
that one made weird things
float worldPos = mul(unity_ObjectToWorld, v.positionOS);
v.positionOS = worldPos;
all vertex shader here:
v2f vert(a2v v)
{
v2f o;
float3 tangent = float3(1, 0, 0);
float3 binormal = float3(0, 0, 1);
float3 p = v.positionOS;
/*float worldPos = mul(unity_ObjectToWorld, v.positionOS);
v.positionOS = worldPos;*/
v.positionOS = mul(unity_ObjectToWorld, v.positionOS);
p += GerstnerWave(_WaveA, v.positionOS.xyz, tangent, binormal);
o.heightOS = p.y;
VertexPositionInputs positionInputs = GetVertexPositionInputs(p);
o.positionCS = positionInputs.positionCS;
o.positionWS = positionInputs.positionWS;
return o;
}
float3 GerstnerWave(
float4 wave, float3 p, inout float3 tangent, inout float3 binormal
)
{
float steepness = wave.z;
float wavelength = wave.w;
float k = 2 * UNITY_PI / wavelength;
float c = sqrt(9.8 / k);
float2 d = normalize(wave.xy) * _Frequency;
float f = k * (dot(d, p.xz) - c * _Time.y * _Speed);
float a = steepness / k;
tangent += float3(
- d.x * d.x * (steepness * sin(f)),
d.x * (steepness * cos(f)),
- d.x * d.y * (steepness * sin(f))
);
binormal += float3(
- d.x * d.y * (steepness * sin(f)),
d.y * (steepness * cos(f)),
- d.y * d.y * (steepness * sin(f))
);
return float3(
d.x * (a * cos(f)),
a * sin(f),
d.y * (a * cos(f))
);
}

Related

GLSL Plane Transparency (Remove Plane Background)

I followed a tutorial online that basically draws a shape on a plane. I want to make only the shape visible, while the background of the plane invisible, so that I can put it onto another plane instead.
I'm extremely new to ThreeJS and GLSL Shaders so I'm completely lost on this, any help is welcome, thank you.
My Fragment Shader:
uniform float time;
uniform float progress;
uniform vec2 mouse;
uniform sampler2D matcap;
uniform vec4 resolution;
varying vec2 vUv;
varying vec3 vPosition;
float PI = 3.141592653589793238;
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c, oc * axis.x * axis.y - axis.z * s, oc * axis.z * axis.x + axis.y * s, 0.0,
oc * axis.x * axis.y + axis.z * s, oc * axis.y * axis.y + c, oc * axis.y * axis.z - axis.x * s, 0.0,
oc * axis.z * axis.x - axis.y * s, oc * axis.y * axis.z + axis.x * s, oc * axis.z * axis.z + c, 0.0,
0.0, 0.0, 0.0, 1.0);
}
vec2 getmatcap(vec3 eye, vec3 normal) {
vec3 reflected = reflect(eye, normal);
float m = 2.8284271247461903 * sqrt( reflected.z+1.0 );
return reflected.xy / m + 0.5;
}
vec3 rotate(vec3 v, vec3 axis, float angle) {
mat4 m = rotationMatrix(axis, angle);
return (m * vec4(v, 1.0)).xyz;
}
float smin( float a, float b, float k )
{
float h = clamp( 0.5+0.5*(b-a)/k, 0.0, 1.0 );
return mix( b, a, h ) - k*h*(1.0-h);
}
float sdSphere( vec3 p, float r ){
return length(p)-r;
}
float sdBox( vec3 p, vec3 b ){
vec3 q = abs(p) - b;
return length(max(q,0.0)) + min(max(q.x,max(q.y,q.z)),0.);
}
float rand(vec2 co){
return fract(sin(dot(co.xy, vec2(12.9898, 78.233))) * 43758.5453);
}
float sdf(vec3 p){
vec3 p1 = rotate(p,vec3(1.),time/5.);
float box = smin(sdBox(p1,vec3(0.2)),sdSphere(p,0.2), 0.3);
float realsphere = sdSphere(p1, 0.1);
float final = mix(box,realsphere,progress);
for(float i = 0.; i < 10.; i++) {
float randOffset = rand(vec2(i,0.));
float progr = 1. - fract(time/3. + randOffset*3.);
vec3 pos = vec3(sin(randOffset*2.*PI),cos(randOffset*2.*PI),0.);
float gotoCenter = sdSphere(p-pos*progr, 0.1);
final = smin(final,gotoCenter,0.1);
}
float mouseSphere = sdSphere(p - vec3(mouse*2.,0.),0.1);
return smin(final, mouseSphere, 0.1);
}
vec3 calcNormal( in vec3 p )
{
const float eps = 0.0001;
const vec2 h = vec2(eps,0);
return normalize( vec3(sdf(p+h.xyy) - sdf(p-h.xyy),
sdf(p+h.yxy) - sdf(p-h.yxy),
sdf(p+h.yyx) - sdf(p-h.yyx) ) );
}
void main() {
float dist = length(vUv - vec2(0.5));
vec3 bg = mix(vec3(0.3),vec3(0.0), dist);
vec3 camPos = vec3(0.,0.,2.);
vec3 ray = normalize(vec3(vUv - vec2(0.5),-1));
vec3 rayPos = camPos;
float t = 0.;
float tMax = 5.;
for(int i = 0; i < 256; i++) {
vec3 pos = camPos + t*ray;
float h = sdf(pos);
if(h<0.0001 || t>tMax) break;
t+=h;
}
vec3 color = bg;
if(t<tMax){
vec3 pos = camPos + t*ray;
color = vec3(1.);
vec3 normal = calcNormal(pos);
color = normal;
float diff = dot(vec3(1.),normal);
vec2 matcapUV = getmatcap(ray,normal);
color = vec3(diff);
color = texture2D(matcap,matcapUV).rgb;
float fresnel = pow(1. + dot(ray,normal),3.);
color = mix(color,bg,fresnel);
}
gl_FragColor = vec4(color,1.);
}

Generating a normal map from a height map in compute shader?

The problem is that when I tried converting height map to normal map. The results are wrong. For some reason there is 3 light sources that is emitting from top (green), right (red), and left (blue) in the texture.
This is the GeoMath.hlsl code that I am using
static const float PI = 3.141592653589793238462643383279;
float2 longitudeLatitudeToUV(float2 longLat) {
float longitude = longLat[0];
float latitude = longLat[1];
float u = longitude / (2 * PI) + 0.5;
float v = latitude / PI + 0.5;
return float2(u,v);
}
float3 longitudeLatitudeToPoint(float2 longLat) {
float longitude = longLat[0];
float latitude = longLat[1];
float x;
float y;
float z;
y = sin(latitude);
float r = cos(latitude);
x = sin(longitude) * r;
z = -cos(longitude) * r;
return float3(x, y, z);
}
float2 uvToLongitudeLatitude(float2 uv) {
float longitude = (uv.x - 0.5) * (2 * PI);
float latitude = (uv.y - 0.5) * PI;
return float2(longitude, latitude);
}
float2 pointToLongitudeLatitude(float3 p) {
float longitude = atan2(p.x, p.z);
float latitude = asin(p.y);
return float2(longitude, latitude);
}
float2 pointToUV(float3 p) {
p = normalize(p);
return longitudeLatitudeToUV(pointToLongitudeLatitude(p));
}
This is the compute shader I am using to convert height map into normal map.
#pragma kernel CSMain
#include "GeoMath.hlsl"
Texture2D<float> _HeightMap;
RWTexture2D<float4> _NormalMap;
int _TextureSize_Width;
int _TextureSize_Height;
float _WorldRadius;
float _HeightMultiplier;
float3 CalculateWorldPoint(uint2 texCoord)
{
float2 uv = texCoord / float2(_TextureSize_Width - 1, _TextureSize_Height - 1);
float2 longLat = uvToLongitudeLatitude(uv);
float3 spherePoint = longitudeLatitudeToPoint(longLat);
float height01 = _HeightMap[texCoord].r + 1.0;
float worldHeight = _WorldRadius + height01 * _HeightMultiplier;
return spherePoint * worldHeight;
}
uint2 WrapIndex(uint2 texCoord)
{
texCoord.x = (texCoord.x + _TextureSize_Width) % _TextureSize_Width;
texCoord.y = max(min(_TextureSize_Height - 1, texCoord.y), 0);
return texCoord;
}
[numthreads(8,8,1)]
void CSMain (uint3 id : SV_DispatchThreadID)
{
float3 normalVector;
float3 posNorth = CalculateWorldPoint(WrapIndex(id.xy + uint2(0, 1)));
float3 posSouth = CalculateWorldPoint(WrapIndex(id.xy + uint2(0, -1)));
float3 posEast = CalculateWorldPoint(WrapIndex(id.xy + uint2(1, 0)));
float3 posWest = CalculateWorldPoint(WrapIndex(id.xy + uint2(-1, 0)));
float3 dirNorth = normalize(posNorth - posSouth);
float3 dirEast = normalize(posEast - posWest);
normalVector = normalize(cross(dirNorth, dirEast));
_NormalMap[id.xy] = float4(normalVector, 1.0);
}
And this is the result I am getting is down below height map (top), generated normal map from the code above (bottom)
I believe that you are trying to get object space normals.
But there is tiny detail is missing.
Possible values for normalized vector3 are -1..1 for each axis.
And possible values for pixel: 0..1.
You just need to adjust ranges.
This line roughly fixes problem:
_NormalMap[id.xy] = float4(normalVector / 2 + float3(0.5, 0.5, 0.5), 1.0);
Result

Dynamically recalculating normals after vertex displacement

Can anyone let me know if I'm on the right tack with this: I have a vertex shader that bumps outward dynamically depending on a point passed in (think a mouse running under a rug). In order for the lighting to update properly, I need to recalculate the normals after modifying the vertex position. I have access to each vertex point as well as the origin.
My current thinking is I do some sort of math to determine the tangent / bitangent and use a cross product to determine the normal. My math skills aren't great, what would I need to do to determine those vectors?
Here's my current vert shader:
void vert(inout appdata_full v)
{
float3 worldPos = mul(unity_ObjectToWorld, v.vertex).xyz;
float distanceToLift = distance(worldPos, _LiftOrigin);
v.vertex.y = smoothstep(_LiftHeight, 0, distanceToLift / _LiftRadius) * 5;
}
A simple solution is covered in this tutorial by Ronja, which I'll summarize here with modifications which reflect your specific case.
First, find two points offset from your current point by a small amount of tangent and bitangent (which you can calculate from normal and tangent):
float3 posPlusTangent = v.vertex + v.tangent * 0.01;
worldPos = mul(unity_ObjectToWorld, posPlusTangent).xyz;
distanceToLift = distance(worldPos, _LiftOrigin);
posPlusTangent.y = smoothstep(_LiftHeight, 0, distanceToLift / _LiftRadius) * 5;
float3 bitangent = cross(v.normal, v.tangent);
float3 posPlusBitangent = v.vertex + bitangent * 0.01;
worldPos = mul(unity_ObjectToWorld, bitangent).xyz;
distanceToLift = distance(worldPos, _LiftOrigin);
posPlusBitangent.y = smoothstep(_LiftHeight, 0, distanceToLift / _LiftRadius) * 5;
Then, find the difference between these offsets and the new vertex pos to find the new tangent and bitangent, then do another cross product to find the resulting normal:
float3 modifiedTangent = posPlusTangent - v.vertex;
float3 modifiedBitangent = posPlusBitangent - v.vertex;
float3 modifiedNormal = cross(modifiedTangent, modifiedBitangent);
v.normal = normalize(modifiedNormal);
Altogether:
float find_offset(float3 localV)
{
float3 worldPos = mul(unity_ObjectToWorld, localV).xyz;
float distanceToLift = distance(worldPos, _LiftOrigin);
return smoothstep(_LiftHeight, 0, distanceToLift / _LiftRadius) * 5;
}
void vert(inout appdata_full v)
{
v.vertex.y = find_offset(v.vertex);
float3 posPlusTangent = v.vertex + v.tangent * 0.01;
posPlusTangent.y = find_offset(posPlusTangent);
float3 bitangent = cross(v.normal, v.tangent);
float3 posPlusBitangent = v.vertex + bitangent * 0.01;
posPlusTangent.y = find_offset(posPlusBitangent);
float3 modifiedTangent = posPlusTangent - v.vertex;
float3 modifiedBitangent = posPlusBitangent - v.vertex;
float3 modifiedNormal = cross(modifiedTangent, modifiedBitangent);
v.normal = normalize(modifiedNormal);
}
This is a method of approximation, but it may be good enough!

Shader works just fine in Unity Editor, but became black in WebGL build

I am working on a project which encodes sensor values at different positions into a 3d heatmap of a building. I use a vertex shader for this purpose and this works just fine in Editor:example, but after I built the scene in WebGL, this turned out to be black.
I has tried using constant loop indices or always include this shader in project settings etc., but none of these works. Here are some of the code:
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.worldPos = mul(unity_ObjectToWorld, v.vertex);
o.screenPos = ComputeScreenPos(o.vertex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
//...
float2 boxIntersection(in float3 ro, in float3 rd, in float3 rad)
{
float3 m = 1.0 / rd;
float3 n = m * ro;
float3 k = abs(m) * rad;
float3 t1 = -n - k;
float3 t2 = -n + k;
float tN = max(max(t1.x, t1.y), t1.z);
float tF = min(min(t2.x, t2.y), t2.z);
if (tN > tF || tF < 0.0) return float2(-1.0, -1.0); // no intersection
return float2(tN, tF);
}
//p in object space
float SampleValue(float3 p) {
float totalValue = 0.0;
float denom = 0.0;
for (int i = 0; i < 34; ++i) { // _DataSize
float4 sd = _SensorData[i];
float dist = length(p - sd.xyz);
totalValue += sd.w / (dist * dist);
denom += 1.0 / (dist * dist);
}
if (denom == 0.0) {
return 0.0;
}
return totalValue / denom;
}
float4 transferFunction(float value) {
float tv = (value - _DataScale.x) / (_DataScale.y - _DataScale.x); // _DataScale.x, _DataScale.y
float4 col = tex2D(_TransferTexture, float2(0.5, tv));
col.w *= _Strength; // _Strength
return float4(col.xyz * col.w, col.w);
}
float4 rayMarch(float3 ro, float3 rd, float dp) {
float3 ro1 = mul(unity_WorldToObject, float4(ro, 1.0));
float3 rd1 = mul(unity_WorldToObject, rd);
float2 t = boxIntersection(ro1, rd1, float3(1, 1, 1) * 0.5);
t.x = length(mul(unity_ObjectToWorld, float4(ro1 + rd1 * max(t.x, 0.0), 1.0)) - ro);
t.y = length(mul(unity_ObjectToWorld, float4(ro1 + rd1 * t.y, 1.0)) - ro);
t.y = min(t.y, dp);
float4 acc = float4(0.0, 0.0, 0.0, 1.0);
float totalDelta = (t.y - t.x);
float delta = totalDelta / float(_RM_Samples - 1.0);
float3 p = ro + t.x * rd;
for (int i = 0; i < 34; ++i) { // _RM_Samples
float v = SampleValue(p);
float4 tf = transferFunction(v);
float tr = exp(-tf.w * delta);
acc.xyz += tf.xyz * acc.w * delta;
acc.w *= tr;
p += delta * rd;
}
return float4(acc.xyz, (1.0 - acc.w) * step(t.x, t.y));
}
fixed4 frag(v2f i) : SV_Target
{
float2 tc = i.screenPos.xy / i.screenPos.w;
float depth = UNITY_SAMPLE_DEPTH(tex2D(_CameraDepthTexture, tc));
float eD = LinearEyeDepth(depth);
float3 ro = _WorldSpaceCameraPos;
float3 rd = normalize(i.worldPos - ro);
float4 col = rayMarch(ro, rd, eD);
//if (col.w < 1) col = float4(1, 0, 0, 1);
//else col = float4(0, 1, 0, 1);
if (wingCullPlaneValue(i.worldPos.xyz) == 0 || cullPlaneValue(i.worldPos.xyz) == 0) {
discard;
}
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
Since this works fine in Editor, I don't think there is any error in boxIntersection or rayMarching functions. I wonder if there is anything special in WebGl that it processes the pixels differently, and I has to tweak some codes accordingly. I am new to WebGL and Shader, and would appreciate any help or advice, thanks in advance.
that's because of the step function.
Approximate it with your own sigmoid function (might be expensive)
float emulated_step(float a, float x){
return 1.0/(1+pow(1000, -(x-a)*8192));
}

How can I convert these fragColor code snippets from ShaderToy so that they will work in Unity?

I'm following this tutorial: https://www.youtube.com/watch?v=CzORVWFvZ28 to convert some code from ShaderToy to Unity. This is the shader that I'm attempting to convert: https://www.shadertoy.com/view/Ws23WD.
I saw that in the tutorial, he was able to take his fragColor statement from ShaderToy and simply return a color in Unity instead. However, when I tried doing that with the code that I have from ShaderToy, an error about not being able to implicitly convert from float3 to float4 popped up. I saw that my color variable is being declared as a float3 which is what must be causing the issue, but I need some help figuring out how to fix this.
I also noticed that I have an 'a' value with the fragColor variable, in addition to the rgb values; would I use a float4 to take in the (r, g, b, a) values?
fixed4 frag (v2f i) : SV_Target
{
//float2 uv = float2(fragCoord.x / iResolution.x, fragCoord.y / iResolution.y);
float2 uv = float2(i.uv);
uv -= 0.5;
//uv /= float2(iResolution.y / iResolution.x, 1);
float3 cam = float3(0, -0.15, -3.5);
float3 dir = normalize(float3(uv,1));
float cam_a2 = sin(_Time.y) * pi * 0.1;
cam.yz = rotate(cam.yz, cam_a2);
dir.yz = rotate(dir.yz, cam_a2);
float cam_a = _Time.y * pi * 0.1;
cam.xz = rotate(cam.xz, cam_a);
dir.xz = rotate(dir.xz, cam_a);
float3 color = float3(0.16, 0.12, 0.10);
float t = 0.00001;
const int maxSteps = 128;
for(int i = 0; i < maxSteps; ++i) {
float3 p = cam + dir * t;
float d = scene(p);
if(d < 0.0001 * t) {
color = float3(1.0, length(p) * (0.6 + (sin(_Time.y*3.0)+1.0) * 0.5 * 0.4), 0);
break;
}
t += d;
}
//fragColor.rgb = color;
return color;
//fragColor.a = 1.0;
}