Swift metal, corner irregular shape - swift

I am developing an application in which non-standard shapes need to be rounded
what I mean
non-standard rect
for rounding i do this ( Metal shader )
constant float2 c00s = float2(0.0, 0.0);
constant float2 c01s = float2(0.0, 1.0);
constant float2 c11s = float2(1.0, 1.0);
constant float2 c10s = float2(1.0, 0.0);
// Check if a point is within a given corner
bool in_cornerS(float2 p, float2 corner, float2 radius) {
// determine the direction we want to be filled
float2 axis = (c11s - corner) - corner;
// warp the point so we are always testing the bottom left point with the
// circle centered on the origin
p = p - (corner + axis * radius);
p *= axis / radius;
return (p.x > 0.0 && p.y > -1.0) || (p.y > 0.0 && p.x > -1.0) || dot(p, p) < 1.0;
}
bool test_rounded_maskS(float2 p, float2 corner_size00, float2 corner_size01, float2 corner_size11, float2 corner_size10) {
return
in_cornerS(p, c00s, corner_size00) &&
in_cornerS(p, c01s, corner_size01) &&
in_cornerS(p, c10s, corner_size11) &&
in_cornerS(p, c11s, corner_size10);
}
fragment float4 fragmentStencil(VertexIn vert [[stage_in]],
device const float2 *dimensions00 [[ buffer(0) ]],
device const float2 *dimensions01 [[ buffer(1) ]],
device const float2 *dimensions11 [[ buffer(2) ]],
device const float2 *dimensions10 [[ buffer(3) ]]) {
float2 a_uv = vert.texCoord;
float2 u_dimensions00 = *dimensions00;
float2 u_dimensions01 = *dimensions01;
float2 u_dimensions11 = *dimensions11;
float2 u_dimensions10 = *dimensions10;
if (!test_rounded_maskS(a_uv, u_dimensions00, u_dimensions01, u_dimensions11, u_dimensions10)) {
discard_fragment();
}
return float4(1.0,1.0,1.0,1.0);
}
i get this result
but I want this
how I can achieve this ?

Step 1: Follow install instructions for the Metal port of NanoVG.
Step 2: Prosper. You can now render vector shapes and paths. This will fill a path:
- (void)renderFillWithPaint:(NVGpaint*)paint
compositeOperation:(NVGcompositeOperationState)compositeOperation
scissor:(NVGscissor*)scissor
fringe:(float)fringe
bounds:(const float*)bounds
paths:(const NVGpath*)paths
npaths:(int)npaths;

Related

How to design a shader in Unity so it uses directions from the camera

I have found a few unique shaders from Shadertoy which I would like to impart an experience of 'sky' within models of buildings in Unity AR.
An example might be this one: https://www.shadertoy.com/view/4tdSWr which is just the sky looking up, or this one, https://www.shadertoy.com/view/4tdSWr which has some directional input from the mouse (click and drag) - the HLSL/unity version of this code except for changes to mouse input is at the end of this post.
Right now the clouds feel more like a green screen projection on the model, so there is no implication of direction or horizon if you are looking parallel to the plane the building is on. (ie, if I am standing with the clouds moving from right to left, as I turn left they don't appear to be moving from behind me and receding into the distance)
I have been trying to understand how to use the camera to 'rotate' the shader result so the direction of the camera is used to ensure the direction of the clouds movement. I would also like to use the angle of the camera with respect to the ground plane to impart a horizon when you are looking out towards the walls.
Any insight on how to do this would be great, especially if it is more than just a 'use _WorldSpaceCameraPos' or 'just add UNITY_MATRIX_MVP' as the results I found through excessive googling haven't really been that helpful so far.
The code for the second shader linked, adjusted for HLSL/Unity except for the mouse inputs is at the end of this post.
Shader "Unlit/skybox"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
iChannel0 ("noise-image", 2D) = "noise-image.png" {}
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
sampler2D iChannel0;
float4 _MainTex_ST;
float random(in float2 uv)
{
return tex2D(iChannel0, uv / 64.).r;
}
float noise(in float2 uv)
{
float2 i = floor(uv);
float2 f = frac(uv);
f = f * f * (3. - 2. * f);
float lb = random(i + float2(0., 0.));
float rb = random(i + float2(1., 0.));
float lt = random(i + float2(0., 1.));
float rt = random(i + float2(1., 1.));
return lerp(lerp(lb, rb, f.x),
lerp(lt, rt, f.x), f.y);
}
#define OCTAVES 8
float fbm(in float2 uv)
{
float value = 0.;
float amplitude = .5;
for (int i = 0; i < OCTAVES; i++)
{
value += noise(uv) * amplitude;
amplitude *= .5;
uv *= 2.;
}
return value;
}
float3 Sky(in float3 ro, in float3 rd)
{
const float SC = 1e5;
// Calculate sky plane
float dist = (SC - ro.y) / rd.y;
float2 p = (ro + dist * rd).xz;
p *= 1.2 / SC;
// from iq's shader, https://www.shadertoy.com/view/MdX3Rr
float3 lightDir = normalize(float3(-.8, .15, -.3));
float sundot = clamp(dot(rd, lightDir), 0.0, 1.0);
float3 cloudCol = float3(1.,1.0,1.0);
//float3 skyCol = float3(.6, .71, .85) - rd.y * .2 * float3(1., .5, 1.) + .15 * .5;
float3 skyCol = float3(0.3,0.5,0.85) - rd.y*rd.y*0.5;
skyCol = lerp( skyCol, mul(0.85, float3(0.7,0.75,0.85)), pow( 1.0 - max(rd.y, 0.0), 4.0 ) );
// sun
float3 sun = mul(mul(0.25 , float3(1.0,0.7,0.4)) , pow( sundot,5.0 ));
sun += mul(mul(0.25 , float3(1.0,0.8,0.6)) , pow( sundot,64.0 ));
sun += mul(mul(0.2 , float3(1.0,0.8,0.6)) , pow( sundot,512.0 ));
skyCol += sun;
// clouds
float t = mul(_Time.y , 0.1);
float den = fbm(float2(p.x - t, p.y - t));
skyCol = lerp( skyCol, cloudCol, smoothstep(.4, .8, den));
// horizon
skyCol = lerp( skyCol, mul(0.68 , float3(.418, .394, .372)), pow( 1.0 - max(rd.y, 0.0), 16.0 ) );
return skyCol;
}
float3x3 setCamera( in float3 ro, in float3 ta, float cr )
{
float3 cw = normalize(ta-ro);
float3 cp = float3(sin(cr), cos(cr),0.0);
float3 cu = normalize( cross(cw,cp) );
float3 cv = normalize( cross(cu,cw) );
return float3x3( cu, cv, cw );
}
void mainImage( out float4 fragColor, in float2 fragCoord )
{
float2 uv = fragCoord.xy / _ScreenParams.xy;
uv -= 0.5;
uv.x *= _ScreenParams.x / _ScreenParams.y;
float2 mouse = iMouse.xy/_ScreenParams.xy;
float3 ro = float3(0.0, 0.0, 0.0);
float3 ta = float3(cos(mul(mouse.x , 6.28)), mul(mouse.y , 2.0), sin(mul(mouse.x , 6.28)));
float3x3 cam = setCamera(ro, ta, 0.0);
float3 rd = normalize(mul(cam , float3(uv, 1.0)));
float3 col = Sky(ro, rd);
fragColor = float4(float3(col),1.0);
}
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
// sample the texture
fixed4 col = tex2D(_MainTex, i.uv);
// apply fog
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
ENDCG
}
}
}

How render 3D render texture in a Unity3D custom shader

Hi is it possible to render a 3d render texture on a custom shader raymarching like a 3dtexture?
I use a 3D render texture because I calculate and set the color of the 3D volume in a compute shader. I set the rendertexture 3D as shown below:
output3DRenderTexture= new RenderTexture(m_CubeDim.x, m_CubeDim.y, 0, thisTexFormat);
outpuoutput3DRenderTextureRendTex.enableRandomWrite = true;
output3DRenderTexture.dimension = UnityEngine.Rendering.TextureDimension.Tex3D;
output3DRenderTexture.volumeDepth = m_CubeDim.z;
output3DRenderTexture.Create();
I populate the 3D RenderTexture data in a a compute shader and GetData helps me confirm the 3d render texture has all the correct color data.
I can successfully render if I replace the 3D renderTex in the custom shader's sampler3D for a 3DTexture I create with the Tex2D slices.
cubeRenderer.material.SetTexture("_MainTex", output3DRenderTexture);//this does not render
versus
cubeRenderer.material.SetTexture("_MainTex", outputTexture3D);//this renders
This post in 2016 seems to suggest it's possible to render 3d render textures in custom shaders but it may now be outdated, it doesn't work for me and no error shows either.
It seems to me there maybe a significant performance hit if create the Textures2D slices in GPU, carry on with the creation of the Texture3D on CPU and re-send this Tex3D to GPU for the custom shader to consume it. After all the 3D volume already existed in GPU except as RenderTexture set as Tex3D. Thank you!
Shader:
#include "UnityCG.cginc"
#define ITERATIONS 100
#define PI2 6.28318530718
half4 _Color;
sampler3D _MainTex;
half _Intensity, _Threshold;
half3 _SliceMin, _SliceMax;
float4x4 _AxisRotationMatrix;
float _Angle;
struct Ray {
float3 origin;
float3 dir;
};
struct AABB {
float3 min;
float3 max;
};
// https http.download.nvidia.com/developer/presentations/2005/GDC/Audio_and_Slides/VolumeRendering_files/GDC_2_files/GDC_2005_VolumeRenderingForGames_files/Slide0073.htm
bool intersect(Ray r, AABB aabb, out float t0, out float t1)
{
float3 invR = 1.0 / r.dir;
float3 tbot = invR * (aabb.min - r.origin);
float3 ttop = invR * (aabb.max - r.origin);
float3 tmin = min(ttop, tbot);
float3 tmax = max(ttop, tbot);
float2 t = max(tmin.xx, tmin.yz);
t0 = max(t.x, t.y);
t = min(tmax.xx, tmax.yz);
t1 = min(t.x, t.y);
return t0 <= t1;
}
float3 get_uv(float3 p) {
return (p + 0.5);
}
float sample_volume(float3 uv, float3 p)
{
float v = tex3D(_MainTex, uv).r * _Intensity;
return v;
}
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
float3 world : TEXCOORD1;
float3 local : TEXCOORD2;
};
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
o.world = mul(unity_ObjectToWorld, v.vertex).xyz;
o.local = v.vertex.xyz;
return o;
}
fixed4 frag(v2f i) : SV_Target
{
Ray ray;
ray.origin = i.local;
// world space direction to object space
float3 dir = (i.world - _WorldSpaceCameraPos);
ray.dir = normalize(mul(unity_WorldToObject, dir));
AABB aabb;
aabb.min = float3(-0.5, -0.5, -0.5);
aabb.max = float3(0.5, 0.5, 0.5);
float tnear;
float tfar;
intersect(ray, aabb, tnear, tfar);
tnear = max(0.0, tnear);
// float3 start = ray.origin + ray.dir * tnear;
float3 start = ray.origin;
float3 end = ray.origin + ray.dir * tfar;
float dist = abs(tfar - tnear);
float step_size = dist / float(ITERATIONS);
float3 ds = normalize(end - start) * step_size;
float4 dst = float4(0, 0, 0, 0);
float3 p = start;
[unroll]
for (int iter = 0; iter < ITERATIONS; iter++)
{
float3 uv = get_uv(p);
float v = sample_volume(uv, p);
float4 src = float4(v, v, v, v);
src.a *= 0.5;
src.rgb *= src.a;
// blend
dst = (1.0 - dst.a) * src + dst;
p += ds;
if (dst.a > _Threshold) break;
}
return saturate(dst) * _Color;
}
#endif

Dynamically recalculating normals after vertex displacement

Can anyone let me know if I'm on the right tack with this: I have a vertex shader that bumps outward dynamically depending on a point passed in (think a mouse running under a rug). In order for the lighting to update properly, I need to recalculate the normals after modifying the vertex position. I have access to each vertex point as well as the origin.
My current thinking is I do some sort of math to determine the tangent / bitangent and use a cross product to determine the normal. My math skills aren't great, what would I need to do to determine those vectors?
Here's my current vert shader:
void vert(inout appdata_full v)
{
float3 worldPos = mul(unity_ObjectToWorld, v.vertex).xyz;
float distanceToLift = distance(worldPos, _LiftOrigin);
v.vertex.y = smoothstep(_LiftHeight, 0, distanceToLift / _LiftRadius) * 5;
}
A simple solution is covered in this tutorial by Ronja, which I'll summarize here with modifications which reflect your specific case.
First, find two points offset from your current point by a small amount of tangent and bitangent (which you can calculate from normal and tangent):
float3 posPlusTangent = v.vertex + v.tangent * 0.01;
worldPos = mul(unity_ObjectToWorld, posPlusTangent).xyz;
distanceToLift = distance(worldPos, _LiftOrigin);
posPlusTangent.y = smoothstep(_LiftHeight, 0, distanceToLift / _LiftRadius) * 5;
float3 bitangent = cross(v.normal, v.tangent);
float3 posPlusBitangent = v.vertex + bitangent * 0.01;
worldPos = mul(unity_ObjectToWorld, bitangent).xyz;
distanceToLift = distance(worldPos, _LiftOrigin);
posPlusBitangent.y = smoothstep(_LiftHeight, 0, distanceToLift / _LiftRadius) * 5;
Then, find the difference between these offsets and the new vertex pos to find the new tangent and bitangent, then do another cross product to find the resulting normal:
float3 modifiedTangent = posPlusTangent - v.vertex;
float3 modifiedBitangent = posPlusBitangent - v.vertex;
float3 modifiedNormal = cross(modifiedTangent, modifiedBitangent);
v.normal = normalize(modifiedNormal);
Altogether:
float find_offset(float3 localV)
{
float3 worldPos = mul(unity_ObjectToWorld, localV).xyz;
float distanceToLift = distance(worldPos, _LiftOrigin);
return smoothstep(_LiftHeight, 0, distanceToLift / _LiftRadius) * 5;
}
void vert(inout appdata_full v)
{
v.vertex.y = find_offset(v.vertex);
float3 posPlusTangent = v.vertex + v.tangent * 0.01;
posPlusTangent.y = find_offset(posPlusTangent);
float3 bitangent = cross(v.normal, v.tangent);
float3 posPlusBitangent = v.vertex + bitangent * 0.01;
posPlusTangent.y = find_offset(posPlusBitangent);
float3 modifiedTangent = posPlusTangent - v.vertex;
float3 modifiedBitangent = posPlusBitangent - v.vertex;
float3 modifiedNormal = cross(modifiedTangent, modifiedBitangent);
v.normal = normalize(modifiedNormal);
}
This is a method of approximation, but it may be good enough!

How to write the texture() function from ShaderToy to Metal shading language?

I am working on a custom Metal shader, and I am trying to replicate this particular effect from shader toy: https://www.shadertoy.com/view/3sfcR2
But I can't seem to understand how to convert their texture() function to the Metal shader format. Any ideas?
Here's what I have so far in Metal:
#include <metal_stdlib>
using namespace metal;
kernel void chromaticAberration(texture2d<float, access::read> inTexture [[ texture(0) ]],
texture2d<float, access::write> outTexture [[ texture(1) ]],
device const float *time [[ buffer(0) ]],
uint2 gid [[ thread_position_in_grid ]])
{
float ChromaticAberration = 0.0 / 10.0 + 8.0;
// get the width and height of the screen texture
uint width = outTexture.get_width();
uint height = outTexture.get_height();
// set its resolution
float2 iResolution = float2(width, height);
float4 orig = inTexture.read(gid);
float2 uv = orig.xy / iResolution.xy;
float2 texel = 1.0 / iResolution.xy;
float2 coords = (uv - 0.5) * 2.0;
float coordDot = dot (coords, coords);
float2 precompute = ChromaticAberration * coordDot * coords;
float2 uvR = uv - texel.xy * precompute;
float2 uvB = uv + texel.xy * precompute;
// How to convert these texture() functions?
float r = texture(iChannel0, uvR).r;
float g = texture(iChannel0, uv).g;
float b = texture(iChannel0, uvB).b;
float a = 1.;
const float4 colorAtPixel = float4(r,g,b,1.0);
outTexture.write(colorAtPixel, gid);
}
EDIT:
Following the answer of #JustSomeGuy I was able to successfully replicate this shader in Metal. Here is the final version:
#include <metal_stdlib>
using namespace metal;
kernel void chromaticAberration(texture2d<float, access::read> inTexture [[ texture(0) ]],
texture2d<float, access::write> outTexture [[ texture(1) ]],
texture2d<float, access::sample> sampleTexture [[ texture(2) ]],
device const float *time [[ buffer(0) ]],
uint2 gid [[ thread_position_in_grid ]])
{
float ChromaticAberration = 0.0 / 10.0 + 8.0;
// get the width and height of the screen texture
uint width = inTexture.get_width();
uint height = inTexture.get_height();
// set its resolution
float2 iResolution = float2(width, height);
float2 uv = float2(gid) / iResolution.xy;
float2 texel = 1.0 / iResolution.xy;
float2 coords = (uv - 0.5) * 2.0;
float coordDot = dot (coords, coords);
float2 precompute = ChromaticAberration * coordDot * coords;
float2 uvR = uv - texel.xy * precompute;
float2 uvB = uv + texel.xy * precompute;
constexpr sampler s(address::clamp_to_edge, filter::linear);
float r = sampleTexture.sample(s, uvR).r;
float g = sampleTexture.sample(s, uv).g;
float b = sampleTexture.sample(s, uvB).b;
const float4 colorAtPixel = float4(r,g,b,1.0);
outTexture.write(colorAtPixel, gid);
}
Kudos to #JustSomeGuy! Thank you for your help!
Well, I think ShaderToy uses glsl or some of it's variants, so texture function is basically a sample call in Metal. Let's look at an example. I'm using this doc. We'll use the 2D version since that's what you probably want.
gvec4 texture( gsampler2D sampler,
vec2 P,
[float bias]);
So in this case iChannel0 is your sampler and uvR, uv, uvB are texture coordinates (P). They should be float2.
So this is a global function that samples color for us from a sampler. In Metal, we have separate textures and samplers and you'll need both to sample. Also, in Metal sample is not a global function, but a member function of a texture2d. Let's look at Metal Language Specification, Section 6.10.3 "2D Texture". There we'll find a method:
Tv sample(sampler s, float2 coord, int2 offset = int2(0)) const
where Tv is the template parameter you have in your texture2d instantiation (probably half or float). It also takes a sampler and texcoords, so this code from your sample:
float r = texture(iChannel0, uvR).r;
float g = texture(iChannel0, uv).g;
float b = texture(iChannel0, uvB).b;
will turn into something like this:
constexpr sampler mySampler { filter::linear };
float r = iChannel0.sample(mySampler, uvR).r;
float g = iChannel0.sample(mySampler, uv).g;
float b = iChannel0.sample(mySampler, uvB).b;
And you will also need to pass texture2d<float> iChannel [[texture(N)]] (where N is the index you chose) to your shader the same way shadertoy does it (it's just a global var there, but in Metal you'd need to actually pass it as an argument).

Shader that transforms a mercator projection to equirectangular?

I am trying to make a shader in Unity taking a mercator projection texture as a source and converting it to an equirectangular projection texture.
Input example:
Output example:
This example does the opposite with an equirectangular as source.
If you look at the source of the above example:
// mercator
float latClamped = clamp(lat, -1.4835298641951802, 1.4835298641951802);
float yMerc = log(tan(PI / 4.0 + latClamped / 2.0)) / PI2;
float xMerc = xEqui / 2.0;
vec4 mercatorPos = vec4(xMerc, yMerc, 0.0, 1.0);
Can anyone help to reverse this so I'm able to go from a mercator map as a source to equirectangular (or even better, azimuthal).
Looking for a way to do 2D texture deformations going from x/y to longitude(x)/latitude(y) and back.
I appreciate your input.
If you want to output the equirectangular projection, you need to convert from equirectangular coordinates to mercator coordinates and then sample the mercator projection at those coordinates.
This is what it would look like in a fragment shader from uvs:
//uv to equirectangular
float lat = (uv.x) * 2 * PI; // from 0 to 2PI
float lon = (uv.y - .5f) * PI; // from -PI to PI
// equirectangular to mercator
float x = lat;
float y = log(tan(PI / 4. + lon / 2.));
// bring x,y into [0,1] range
x = x / (2*PI);
y = (y+PI) / (2*PI);
// sample mercator projection
fixed4 col = tex2D(_MainTex, float2(x,y));
The same thing applies to the azimuthal projection: You can go from azimuthal coordinates -> equirectangular -> mercator and sample the image. Or you can find a formula to go directly from azimuthal -> mercator. The wiki pages have a bunch of formulas to go back and forth between projections. Here is a full shader to play around with. Input is a mercator projection and outputs a equirectangular or azimuthal projection (choose from the dropdown menu)
Shader "Unlit/NewUnlitShader 1"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
[Enum(Equirectangular,0,Azimuthal,1)]
_Azimuthal("Projection", float) = 0
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
float _Azimuthal;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
return o;
}
#define PI 3.141592653589793238462f
#define PI2 6.283185307179586476924f
float2 uvToEquirectangular(float2 uv) {
float lat = (uv.x) * PI2; // from 0 to 2PI
float lon = (uv.y - .5f) * PI; // from -PI to PI
return float2(lat, lon);
}
float2 uvAsAzimuthalToEquirectangular(float2 uv) {
float2 coord = (uv - .5) * 4;
float radius = length(coord);
float angle = atan2(coord.y, coord.x) + PI;
//formula from https://en.wikipedia.org/wiki/Lambert_azimuthal_equal-area_projection
float lat = angle;
float lon = 2 * acos(radius / 2.) - PI / 2;
return float2(lat, lon);
}
fixed4 frag(v2f i) : SV_Target
{
// get equirectangular coordinates
float2 coord = _Azimuthal ? uvAsAzimuthalToEquirectangular(i.uv) : uvToEquirectangular(i.uv);
// equirectangular to mercator
float x = coord.x;
float y = log(tan(PI / 4. + coord.y / 2.));
// brin x,y into [0,1] range
x = x / PI2;
y = (y + PI) / PI2;
fixed4 col = tex2D(_MainTex, float2(x,y));
// just to make it look nicer
col = _Azimuthal && length(i.uv*2-1) > 1 ? 1 : col;
return col;
}
ENDCG
}
}
}