Shader works just fine in Unity Editor, but became black in WebGL build - unity3d

I am working on a project which encodes sensor values at different positions into a 3d heatmap of a building. I use a vertex shader for this purpose and this works just fine in Editor:example, but after I built the scene in WebGL, this turned out to be black.
I has tried using constant loop indices or always include this shader in project settings etc., but none of these works. Here are some of the code:
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.worldPos = mul(unity_ObjectToWorld, v.vertex);
o.screenPos = ComputeScreenPos(o.vertex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
//...
float2 boxIntersection(in float3 ro, in float3 rd, in float3 rad)
{
float3 m = 1.0 / rd;
float3 n = m * ro;
float3 k = abs(m) * rad;
float3 t1 = -n - k;
float3 t2 = -n + k;
float tN = max(max(t1.x, t1.y), t1.z);
float tF = min(min(t2.x, t2.y), t2.z);
if (tN > tF || tF < 0.0) return float2(-1.0, -1.0); // no intersection
return float2(tN, tF);
}
//p in object space
float SampleValue(float3 p) {
float totalValue = 0.0;
float denom = 0.0;
for (int i = 0; i < 34; ++i) { // _DataSize
float4 sd = _SensorData[i];
float dist = length(p - sd.xyz);
totalValue += sd.w / (dist * dist);
denom += 1.0 / (dist * dist);
}
if (denom == 0.0) {
return 0.0;
}
return totalValue / denom;
}
float4 transferFunction(float value) {
float tv = (value - _DataScale.x) / (_DataScale.y - _DataScale.x); // _DataScale.x, _DataScale.y
float4 col = tex2D(_TransferTexture, float2(0.5, tv));
col.w *= _Strength; // _Strength
return float4(col.xyz * col.w, col.w);
}
float4 rayMarch(float3 ro, float3 rd, float dp) {
float3 ro1 = mul(unity_WorldToObject, float4(ro, 1.0));
float3 rd1 = mul(unity_WorldToObject, rd);
float2 t = boxIntersection(ro1, rd1, float3(1, 1, 1) * 0.5);
t.x = length(mul(unity_ObjectToWorld, float4(ro1 + rd1 * max(t.x, 0.0), 1.0)) - ro);
t.y = length(mul(unity_ObjectToWorld, float4(ro1 + rd1 * t.y, 1.0)) - ro);
t.y = min(t.y, dp);
float4 acc = float4(0.0, 0.0, 0.0, 1.0);
float totalDelta = (t.y - t.x);
float delta = totalDelta / float(_RM_Samples - 1.0);
float3 p = ro + t.x * rd;
for (int i = 0; i < 34; ++i) { // _RM_Samples
float v = SampleValue(p);
float4 tf = transferFunction(v);
float tr = exp(-tf.w * delta);
acc.xyz += tf.xyz * acc.w * delta;
acc.w *= tr;
p += delta * rd;
}
return float4(acc.xyz, (1.0 - acc.w) * step(t.x, t.y));
}
fixed4 frag(v2f i) : SV_Target
{
float2 tc = i.screenPos.xy / i.screenPos.w;
float depth = UNITY_SAMPLE_DEPTH(tex2D(_CameraDepthTexture, tc));
float eD = LinearEyeDepth(depth);
float3 ro = _WorldSpaceCameraPos;
float3 rd = normalize(i.worldPos - ro);
float4 col = rayMarch(ro, rd, eD);
//if (col.w < 1) col = float4(1, 0, 0, 1);
//else col = float4(0, 1, 0, 1);
if (wingCullPlaneValue(i.worldPos.xyz) == 0 || cullPlaneValue(i.worldPos.xyz) == 0) {
discard;
}
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
Since this works fine in Editor, I don't think there is any error in boxIntersection or rayMarching functions. I wonder if there is anything special in WebGl that it processes the pixels differently, and I has to tweak some codes accordingly. I am new to WebGL and Shader, and would appreciate any help or advice, thanks in advance.

that's because of the step function.
Approximate it with your own sigmoid function (might be expensive)
float emulated_step(float a, float x){
return 1.0/(1+pow(1000, -(x-a)*8192));
}

Related

Generating a normal map from a height map in compute shader?

The problem is that when I tried converting height map to normal map. The results are wrong. For some reason there is 3 light sources that is emitting from top (green), right (red), and left (blue) in the texture.
This is the GeoMath.hlsl code that I am using
static const float PI = 3.141592653589793238462643383279;
float2 longitudeLatitudeToUV(float2 longLat) {
float longitude = longLat[0];
float latitude = longLat[1];
float u = longitude / (2 * PI) + 0.5;
float v = latitude / PI + 0.5;
return float2(u,v);
}
float3 longitudeLatitudeToPoint(float2 longLat) {
float longitude = longLat[0];
float latitude = longLat[1];
float x;
float y;
float z;
y = sin(latitude);
float r = cos(latitude);
x = sin(longitude) * r;
z = -cos(longitude) * r;
return float3(x, y, z);
}
float2 uvToLongitudeLatitude(float2 uv) {
float longitude = (uv.x - 0.5) * (2 * PI);
float latitude = (uv.y - 0.5) * PI;
return float2(longitude, latitude);
}
float2 pointToLongitudeLatitude(float3 p) {
float longitude = atan2(p.x, p.z);
float latitude = asin(p.y);
return float2(longitude, latitude);
}
float2 pointToUV(float3 p) {
p = normalize(p);
return longitudeLatitudeToUV(pointToLongitudeLatitude(p));
}
This is the compute shader I am using to convert height map into normal map.
#pragma kernel CSMain
#include "GeoMath.hlsl"
Texture2D<float> _HeightMap;
RWTexture2D<float4> _NormalMap;
int _TextureSize_Width;
int _TextureSize_Height;
float _WorldRadius;
float _HeightMultiplier;
float3 CalculateWorldPoint(uint2 texCoord)
{
float2 uv = texCoord / float2(_TextureSize_Width - 1, _TextureSize_Height - 1);
float2 longLat = uvToLongitudeLatitude(uv);
float3 spherePoint = longitudeLatitudeToPoint(longLat);
float height01 = _HeightMap[texCoord].r + 1.0;
float worldHeight = _WorldRadius + height01 * _HeightMultiplier;
return spherePoint * worldHeight;
}
uint2 WrapIndex(uint2 texCoord)
{
texCoord.x = (texCoord.x + _TextureSize_Width) % _TextureSize_Width;
texCoord.y = max(min(_TextureSize_Height - 1, texCoord.y), 0);
return texCoord;
}
[numthreads(8,8,1)]
void CSMain (uint3 id : SV_DispatchThreadID)
{
float3 normalVector;
float3 posNorth = CalculateWorldPoint(WrapIndex(id.xy + uint2(0, 1)));
float3 posSouth = CalculateWorldPoint(WrapIndex(id.xy + uint2(0, -1)));
float3 posEast = CalculateWorldPoint(WrapIndex(id.xy + uint2(1, 0)));
float3 posWest = CalculateWorldPoint(WrapIndex(id.xy + uint2(-1, 0)));
float3 dirNorth = normalize(posNorth - posSouth);
float3 dirEast = normalize(posEast - posWest);
normalVector = normalize(cross(dirNorth, dirEast));
_NormalMap[id.xy] = float4(normalVector, 1.0);
}
And this is the result I am getting is down below height map (top), generated normal map from the code above (bottom)
I believe that you are trying to get object space normals.
But there is tiny detail is missing.
Possible values for normalized vector3 are -1..1 for each axis.
And possible values for pixel: 0..1.
You just need to adjust ranges.
This line roughly fixes problem:
_NormalMap[id.xy] = float4(normalVector / 2 + float3(0.5, 0.5, 0.5), 1.0);
Result

How to design a shader in Unity so it uses directions from the camera

I have found a few unique shaders from Shadertoy which I would like to impart an experience of 'sky' within models of buildings in Unity AR.
An example might be this one: https://www.shadertoy.com/view/4tdSWr which is just the sky looking up, or this one, https://www.shadertoy.com/view/4tdSWr which has some directional input from the mouse (click and drag) - the HLSL/unity version of this code except for changes to mouse input is at the end of this post.
Right now the clouds feel more like a green screen projection on the model, so there is no implication of direction or horizon if you are looking parallel to the plane the building is on. (ie, if I am standing with the clouds moving from right to left, as I turn left they don't appear to be moving from behind me and receding into the distance)
I have been trying to understand how to use the camera to 'rotate' the shader result so the direction of the camera is used to ensure the direction of the clouds movement. I would also like to use the angle of the camera with respect to the ground plane to impart a horizon when you are looking out towards the walls.
Any insight on how to do this would be great, especially if it is more than just a 'use _WorldSpaceCameraPos' or 'just add UNITY_MATRIX_MVP' as the results I found through excessive googling haven't really been that helpful so far.
The code for the second shader linked, adjusted for HLSL/Unity except for the mouse inputs is at the end of this post.
Shader "Unlit/skybox"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
iChannel0 ("noise-image", 2D) = "noise-image.png" {}
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
sampler2D iChannel0;
float4 _MainTex_ST;
float random(in float2 uv)
{
return tex2D(iChannel0, uv / 64.).r;
}
float noise(in float2 uv)
{
float2 i = floor(uv);
float2 f = frac(uv);
f = f * f * (3. - 2. * f);
float lb = random(i + float2(0., 0.));
float rb = random(i + float2(1., 0.));
float lt = random(i + float2(0., 1.));
float rt = random(i + float2(1., 1.));
return lerp(lerp(lb, rb, f.x),
lerp(lt, rt, f.x), f.y);
}
#define OCTAVES 8
float fbm(in float2 uv)
{
float value = 0.;
float amplitude = .5;
for (int i = 0; i < OCTAVES; i++)
{
value += noise(uv) * amplitude;
amplitude *= .5;
uv *= 2.;
}
return value;
}
float3 Sky(in float3 ro, in float3 rd)
{
const float SC = 1e5;
// Calculate sky plane
float dist = (SC - ro.y) / rd.y;
float2 p = (ro + dist * rd).xz;
p *= 1.2 / SC;
// from iq's shader, https://www.shadertoy.com/view/MdX3Rr
float3 lightDir = normalize(float3(-.8, .15, -.3));
float sundot = clamp(dot(rd, lightDir), 0.0, 1.0);
float3 cloudCol = float3(1.,1.0,1.0);
//float3 skyCol = float3(.6, .71, .85) - rd.y * .2 * float3(1., .5, 1.) + .15 * .5;
float3 skyCol = float3(0.3,0.5,0.85) - rd.y*rd.y*0.5;
skyCol = lerp( skyCol, mul(0.85, float3(0.7,0.75,0.85)), pow( 1.0 - max(rd.y, 0.0), 4.0 ) );
// sun
float3 sun = mul(mul(0.25 , float3(1.0,0.7,0.4)) , pow( sundot,5.0 ));
sun += mul(mul(0.25 , float3(1.0,0.8,0.6)) , pow( sundot,64.0 ));
sun += mul(mul(0.2 , float3(1.0,0.8,0.6)) , pow( sundot,512.0 ));
skyCol += sun;
// clouds
float t = mul(_Time.y , 0.1);
float den = fbm(float2(p.x - t, p.y - t));
skyCol = lerp( skyCol, cloudCol, smoothstep(.4, .8, den));
// horizon
skyCol = lerp( skyCol, mul(0.68 , float3(.418, .394, .372)), pow( 1.0 - max(rd.y, 0.0), 16.0 ) );
return skyCol;
}
float3x3 setCamera( in float3 ro, in float3 ta, float cr )
{
float3 cw = normalize(ta-ro);
float3 cp = float3(sin(cr), cos(cr),0.0);
float3 cu = normalize( cross(cw,cp) );
float3 cv = normalize( cross(cu,cw) );
return float3x3( cu, cv, cw );
}
void mainImage( out float4 fragColor, in float2 fragCoord )
{
float2 uv = fragCoord.xy / _ScreenParams.xy;
uv -= 0.5;
uv.x *= _ScreenParams.x / _ScreenParams.y;
float2 mouse = iMouse.xy/_ScreenParams.xy;
float3 ro = float3(0.0, 0.0, 0.0);
float3 ta = float3(cos(mul(mouse.x , 6.28)), mul(mouse.y , 2.0), sin(mul(mouse.x , 6.28)));
float3x3 cam = setCamera(ro, ta, 0.0);
float3 rd = normalize(mul(cam , float3(uv, 1.0)));
float3 col = Sky(ro, rd);
fragColor = float4(float3(col),1.0);
}
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
// sample the texture
fixed4 col = tex2D(_MainTex, i.uv);
// apply fog
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
ENDCG
}
}
}

URP Shader, almost the same codes affect differently

I'm working on urp shader gerstner wave. Problem was that two side by side ocean plane waves were not continuesly. So, I tought if vertex positions would be world position, problem could fixed. it worked. However there was something I really don't understand.
this code worked
v.positionOS = mul(unity_ObjectToWorld, v.positionOS);
that one made weird things
float worldPos = mul(unity_ObjectToWorld, v.positionOS);
v.positionOS = worldPos;
all vertex shader here:
v2f vert(a2v v)
{
v2f o;
float3 tangent = float3(1, 0, 0);
float3 binormal = float3(0, 0, 1);
float3 p = v.positionOS;
/*float worldPos = mul(unity_ObjectToWorld, v.positionOS);
v.positionOS = worldPos;*/
v.positionOS = mul(unity_ObjectToWorld, v.positionOS);
p += GerstnerWave(_WaveA, v.positionOS.xyz, tangent, binormal);
o.heightOS = p.y;
VertexPositionInputs positionInputs = GetVertexPositionInputs(p);
o.positionCS = positionInputs.positionCS;
o.positionWS = positionInputs.positionWS;
return o;
}
float3 GerstnerWave(
float4 wave, float3 p, inout float3 tangent, inout float3 binormal
)
{
float steepness = wave.z;
float wavelength = wave.w;
float k = 2 * UNITY_PI / wavelength;
float c = sqrt(9.8 / k);
float2 d = normalize(wave.xy) * _Frequency;
float f = k * (dot(d, p.xz) - c * _Time.y * _Speed);
float a = steepness / k;
tangent += float3(
- d.x * d.x * (steepness * sin(f)),
d.x * (steepness * cos(f)),
- d.x * d.y * (steepness * sin(f))
);
binormal += float3(
- d.x * d.y * (steepness * sin(f)),
d.y * (steepness * cos(f)),
- d.y * d.y * (steepness * sin(f))
);
return float3(
d.x * (a * cos(f)),
a * sin(f),
d.y * (a * cos(f))
);
}

Dynamically recalculating normals after vertex displacement

Can anyone let me know if I'm on the right tack with this: I have a vertex shader that bumps outward dynamically depending on a point passed in (think a mouse running under a rug). In order for the lighting to update properly, I need to recalculate the normals after modifying the vertex position. I have access to each vertex point as well as the origin.
My current thinking is I do some sort of math to determine the tangent / bitangent and use a cross product to determine the normal. My math skills aren't great, what would I need to do to determine those vectors?
Here's my current vert shader:
void vert(inout appdata_full v)
{
float3 worldPos = mul(unity_ObjectToWorld, v.vertex).xyz;
float distanceToLift = distance(worldPos, _LiftOrigin);
v.vertex.y = smoothstep(_LiftHeight, 0, distanceToLift / _LiftRadius) * 5;
}
A simple solution is covered in this tutorial by Ronja, which I'll summarize here with modifications which reflect your specific case.
First, find two points offset from your current point by a small amount of tangent and bitangent (which you can calculate from normal and tangent):
float3 posPlusTangent = v.vertex + v.tangent * 0.01;
worldPos = mul(unity_ObjectToWorld, posPlusTangent).xyz;
distanceToLift = distance(worldPos, _LiftOrigin);
posPlusTangent.y = smoothstep(_LiftHeight, 0, distanceToLift / _LiftRadius) * 5;
float3 bitangent = cross(v.normal, v.tangent);
float3 posPlusBitangent = v.vertex + bitangent * 0.01;
worldPos = mul(unity_ObjectToWorld, bitangent).xyz;
distanceToLift = distance(worldPos, _LiftOrigin);
posPlusBitangent.y = smoothstep(_LiftHeight, 0, distanceToLift / _LiftRadius) * 5;
Then, find the difference between these offsets and the new vertex pos to find the new tangent and bitangent, then do another cross product to find the resulting normal:
float3 modifiedTangent = posPlusTangent - v.vertex;
float3 modifiedBitangent = posPlusBitangent - v.vertex;
float3 modifiedNormal = cross(modifiedTangent, modifiedBitangent);
v.normal = normalize(modifiedNormal);
Altogether:
float find_offset(float3 localV)
{
float3 worldPos = mul(unity_ObjectToWorld, localV).xyz;
float distanceToLift = distance(worldPos, _LiftOrigin);
return smoothstep(_LiftHeight, 0, distanceToLift / _LiftRadius) * 5;
}
void vert(inout appdata_full v)
{
v.vertex.y = find_offset(v.vertex);
float3 posPlusTangent = v.vertex + v.tangent * 0.01;
posPlusTangent.y = find_offset(posPlusTangent);
float3 bitangent = cross(v.normal, v.tangent);
float3 posPlusBitangent = v.vertex + bitangent * 0.01;
posPlusTangent.y = find_offset(posPlusBitangent);
float3 modifiedTangent = posPlusTangent - v.vertex;
float3 modifiedBitangent = posPlusBitangent - v.vertex;
float3 modifiedNormal = cross(modifiedTangent, modifiedBitangent);
v.normal = normalize(modifiedNormal);
}
This is a method of approximation, but it may be good enough!

How can I convert these fragColor code snippets from ShaderToy so that they will work in Unity?

I'm following this tutorial: https://www.youtube.com/watch?v=CzORVWFvZ28 to convert some code from ShaderToy to Unity. This is the shader that I'm attempting to convert: https://www.shadertoy.com/view/Ws23WD.
I saw that in the tutorial, he was able to take his fragColor statement from ShaderToy and simply return a color in Unity instead. However, when I tried doing that with the code that I have from ShaderToy, an error about not being able to implicitly convert from float3 to float4 popped up. I saw that my color variable is being declared as a float3 which is what must be causing the issue, but I need some help figuring out how to fix this.
I also noticed that I have an 'a' value with the fragColor variable, in addition to the rgb values; would I use a float4 to take in the (r, g, b, a) values?
fixed4 frag (v2f i) : SV_Target
{
//float2 uv = float2(fragCoord.x / iResolution.x, fragCoord.y / iResolution.y);
float2 uv = float2(i.uv);
uv -= 0.5;
//uv /= float2(iResolution.y / iResolution.x, 1);
float3 cam = float3(0, -0.15, -3.5);
float3 dir = normalize(float3(uv,1));
float cam_a2 = sin(_Time.y) * pi * 0.1;
cam.yz = rotate(cam.yz, cam_a2);
dir.yz = rotate(dir.yz, cam_a2);
float cam_a = _Time.y * pi * 0.1;
cam.xz = rotate(cam.xz, cam_a);
dir.xz = rotate(dir.xz, cam_a);
float3 color = float3(0.16, 0.12, 0.10);
float t = 0.00001;
const int maxSteps = 128;
for(int i = 0; i < maxSteps; ++i) {
float3 p = cam + dir * t;
float d = scene(p);
if(d < 0.0001 * t) {
color = float3(1.0, length(p) * (0.6 + (sin(_Time.y*3.0)+1.0) * 0.5 * 0.4), 0);
break;
}
t += d;
}
//fragColor.rgb = color;
return color;
//fragColor.a = 1.0;
}