How do you write z-depth in a shader? - unity3d

This shader (code at the end) uses raymarching to render procedural geometry:
However, in the image (above) the cube in the background should be partially occluding the pink solid; it isn't because of this:
struct fragmentOutput {
float4 color : SV_Target;
float zvalue : SV_Depth;
};
fragmentOutput frag(fragmentInput i) {
fragmentOutput o;
...
o.zvalue = IF(output[1] > 0, 0, 1);
}
However, I cannot for the life of my figure out how to correctly generate a depth value here that correctly allows raymarched solids to obscure / not obscure the other geometry in the scene.
I know it's possible, because there's a working example here: https://github.com/i-saint/RaymarchingOnUnity5 (associated japanese language blog http://i-saint.hatenablog.com/)
However, it's in japanese, and largely undocumented, as well as being extremely complex.
I'm looking for an extremely simplified version of the same thing, from which to build on.
In the shader I'm currently using the fragment program line:
float2 output = march_raycast(i.worldpos, i.viewdir, _far, _step);
Maps an input point p on the quad need the camera (which this shader attached to it), into an output float2 (density, distance), where distance is the distance from the quad to the 'point' on the procedural surface.
The question is, how do I map that into a depth buffer in any useful way?
The complete shader is here, to use it, create a new scene with a sphere at 0,0,0 with a size of at least 50 and assign the shader to it:
Shader "Shaders/Raymarching/BasicMarch" {
Properties {
_sun ("Sun", Vector) = (0, 0, 0, 0)
_far ("Far Depth Value", Float) = 20
_edgeFuzz ("Edge fuzziness", Range(1, 20)) = 1.0
_lightStep ("Light step", Range(0.1, 5)) = 1.0
_step ("Raycast step", Range(0.1, 5)) = 1.0
_dark ("Dark value", Color) = (0, 0, 0, 0)
_light ("Light Value", Color) = (1, 1, 1, 1)
[Toggle] _debugDepth ("Display depth field", Float) = 0
[Toggle] _debugLight ("Display light field", Float) = 0
}
SubShader {
Tags {"Queue"="Transparent" "IgnoreProjector"="True" "RenderType"="Transparent"}
Blend SrcAlpha OneMinusSrcAlpha
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 3.0
#include "UnityCG.cginc"
#include "UnityLightingCommon.cginc" // for _LightColor0
#define IF(a, b, c) lerp(b, c, step((fixed) (a), 0));
uniform float _far;
uniform float _lightStep;
uniform float3 _sun;
uniform float4 _light;
uniform float4 _dark;
uniform float _debugDepth;
uniform float _debugLight;
uniform float _edgeFuzz;
uniform float _step;
/**
* Sphere at origin c, size s
* #param center_ The center of the sphere
* #param radius_ The radius of the sphere
* #param point_ The point to check
*/
float geom_soft_sphere(float3 center_, float radius_, float3 point_) {
float rtn = distance(center_, point_);
return IF(rtn < radius_, (radius_ - rtn) / radius_ / _edgeFuzz, 0);
}
/**
* A rectoid centered at center_
* #param center_ The center of the cube
* #param halfsize_ The halfsize of the cube in each direction
*/
float geom_rectoid(float3 center_, float3 halfsize_, float3 point_) {
float rtn = IF((point_[0] < (center_[0] - halfsize_[0])) || (point_[0] > (center_[0] + halfsize_[0])), 0, 1);
rtn = rtn * IF((point_[1] < (center_[1] - halfsize_[1])) || (point_[1] > (center_[1] + halfsize_[1])), 0, 1);
rtn = rtn * IF((point_[2] < (center_[2] - halfsize_[2])) || (point_[2] > (center_[2] + halfsize_[2])), 0, 1);
rtn = rtn * distance(point_, center_);
float radius = length(halfsize_);
return IF(rtn > 0, (radius - rtn) / radius / _edgeFuzz, 0);
}
/**
* Calculate procedural geometry.
* Return (0, 0, 0) for empty space.
* #param point_ A float3; return the density of the solid at p.
* #return The density of the procedural geometry of p.
*/
float march_geometry(float3 point_) {
return
geom_rectoid(float3(0, 0, 0), float3(7, 7, 7), point_) +
geom_soft_sphere(float3(10, 0, 0), 7, point_) +
geom_soft_sphere(float3(-10, 0, 0), 7, point_) +
geom_soft_sphere(float3(0, 0, 10), 7, point_) +
geom_soft_sphere(float3(0, 0, -10), 7, point_);
}
/** Return a randomish value to sample step with */
float rand(float3 seed) {
return frac(sin(dot(seed.xyz ,float3(12.9898,78.233,45.5432))) * 43758.5453);
}
/**
* March the point p along the cast path c, and return a float2
* which is (density, depth); if the density is 0 no match was
* found in the given depth domain.
* #param point_ The origin point
* #param cast_ The cast vector
* #param max_ The maximum depth to step to
* #param step_ The increment to step in
* #return (denity, depth)
*/
float2 march_raycast(float3 point_, float3 cast_, float max_, float step_) {
float origin_ = point_;
float depth_ = 0;
float density_ = 0;
int steps = floor(max_ / step_);
for (int i = 0; (density_ <= 1) && (i < steps); ++i) {
float3 target_ = point_ + cast_ * i * step_ + rand(point_) * cast_ * step_;
density_ += march_geometry(target_);
depth_ = IF((depth_ == 0) && (density_ != 0), distance(point_, target_), depth_);
}
density_ = IF(density_ > 1, 1, density_);
return float2(density_, depth_);
}
/**
* Simple lighting; raycast from depth point to light source, and get density on path
* #param point_ The origin point on the render target
* #param cast_ The original cast (ie. camera view direction)
* #param raycast_ The result of the original raycast
* #param max_ The max distance to cast
* #param step_ The step increment
*/
float2 march_lighting(float3 point_, float3 cast_, float2 raycast_, float max_, float step_) {
float3 target_ = point_ + cast_ * raycast_[1];
float3 lcast_ = normalize(_sun - target_);
return march_raycast(target_, lcast_, max_, _lightStep);
}
struct fragmentInput {
float4 position : SV_POSITION;
float4 worldpos : TEXCOORD0;
float3 viewdir : TEXCOORD1;
};
struct fragmentOutput {
float4 color : SV_Target;
float zvalue : SV_Depth;
};
fragmentInput vert(appdata_base i) {
fragmentInput o;
o.position = mul(UNITY_MATRIX_MVP, i.vertex);
o.worldpos = mul(_Object2World, i.vertex);
o.viewdir = -normalize(WorldSpaceViewDir(i.vertex));
return o;
}
fragmentOutput frag(fragmentInput i) {
fragmentOutput o;
// Raycast
float2 output = march_raycast(i.worldpos, i.viewdir, _far, _step);
float2 light = march_lighting(i.worldpos, i.viewdir, output, _far, _step);
float lvalue = 1.0 - light[0];
float depth = output[1] / _far;
// Generate fragment color
float4 color = lerp(_light, _dark, lvalue);
// Debugging: Depth
float4 debug_depth = float4(depth, depth, depth, 1);
color = IF(_debugDepth, debug_depth, color);
// Debugging: Color
float4 debug_light = float4(lvalue, lvalue, lvalue, 1);
color = IF(_debugLight, debug_light, color);
// Always apply the depth map
color.a = output[0];
o.zvalue = IF(output[1] > 0, 0, 1);
o.color = IF(output[1] <= 0, 0, color);
return o;
}
ENDCG
}
}
}
(Yes, I know it's quite complex, but it's very difficult to reduce this kind of shader into a 'simple test case' to play with)
I'll happy accept any answer which is a modification of the shader above that allows the procedural solid to be obscured / obscure other geometry in the scene as though is was 'real geometry'.
--
Edit: You can get this 'working' by explicitly setting the depth value on the other geometry in the scene using the same depth function as the raymarcher:
...however, I still cannot get this to work correctly with geometry using the 'standard' shader. Still hunting for a working solution...

Looking at the project you linked to, the most important difference I see is that their raycast march function uses a pass-by-reference parameter to return a fragment position called ray_pos. That position appears to be in object space, so they transform it using the view-projection matrix to get clip space and read a depth value.
The project also has a compute_depth function, but it looks pretty simple.
Your march_raycast function is already calculating a target_ position, so you could refactor a bit, apply the out keyword to return it to the caller, and use it in depth calculations:
//get position using pass-by-ref
float3 ray_pos = i.worldpos;
float2 output = march_raycast(ray_pos, i.viewdir, _far, _step);
...
//convert position to clip space, read depth
float4 clip_pos = mul(UNITY_MATRIX_VP, float4(ray_pos, 1.0));
o.zvalue = clip_pos.z / clip_pos.w;

There might be a problem with render setup.
To allow your shader to output per-pixel depth, its depth-tests must be disabled. Otherwise, GPU would - for optimization - assume that all your pixels' depths are the interpolated depths from your vertices.
As your shader does not do depth-tests, it must be rendered before the geometry that does, or it will just overwrite whatever the other geometry wrote to depth buffer.
It must however have depth-write enabled, or the depth output of your pixel shader will be ignored and not written to depth-buffer.
Your RenderType is Transparent, which, I assume, should disable depth-write. That would be a problem.
Your Queue is Transparent as well, which should have it render after all solid Geometry, and back to front, which would be a problem as well, as we already concluded we have to render before.
So
put your shader in an early render queue that will render before solid geometry
have depth-write enabled
have depth-test disabled

Related

How to set the saturation level of an entire color channel in Unity

I would like to set the saturation of an entire color channel in my main camera. The closest option that I've found was the Hue vs. Sat(uration) Grading Curve. In the background of the scene is a palm tree that is colored teal. I want the green level of the tree to still show. Same with the top of the grass in the foreground, It's closer to yellow than green, but I'd still want to see the little bit of green value that it has.
I have been searching the Unity documentation and the asset store for a possible 3rd party shader for weeks, but have come up empty handed. My current result is the best I could come up with, any help would be greatly appreciated. Thank you
SOLVED
-by check-marked answer. Just wanted to share what the results look like for anyone in the future who stumbles across this issue. Compare the above screenshot, where the palm tree in the background and the grass tops in the foreground are just black and white, to the after screenshot below. Full control in the scene of RGB saturation!
Examples using this method:
Below is a postprocessing shader intended to let you set the saturation of each color channel.
It first takes the original pixel color, gets the hue, saturation, and luminance. That color is taken to its most saturated, neutral-luminance version. The rgb of that is then multiplied by the desaturation factor to compute the rgb of the new hue. The magnitude of that rgb is multiplied by the original saturation to get the new saturation. This new hue and saturation is fed back in with the original luminance to compute the new color.
Shader "Custom/ChannelSaturation" {
Properties{
_MainTex("Base", 2D) = "white" {}
_rSat("Red Saturation", Range(0, 1)) = 1
_gSat("Green Saturation", Range(0, 1)) = 1
_bSat("Blue Saturation", Range(0, 1)) = 1
}
SubShader{
Pass {
CGPROGRAM
#pragma vertex vert_img
#pragma fragment frag
#include "UnityCG.cginc"
uniform sampler2D _MainTex;
float _rSat;
float _gSat;
float _bSat;
/*
source: modified version of https://www.shadertoy.com/view/MsKGRW
written # https://gist.github.com/hiroakioishi/
c4eda57c29ae7b2912c4809087d5ffd0
*/
float3 rgb2hsl(float3 c) {
float epsilon = 0.00000001;
float cmin = min( c.r, min( c.g, c.b ) );
float cmax = max( c.r, max( c.g, c.b ) );
float cd = cmax - cmin;
float3 hsl = float3(0.0, 0.0, 0.0);
hsl.z = (cmax + cmin) / 2.0;
hsl.y = lerp(cd / (cmax + cmin + epsilon),
cd / (epsilon + 2.0 - (cmax + cmin)),
step(0.5, hsl.z));
float3 a = float3(1.0 - step(epsilon, abs(cmax - c)));
a = lerp(float3(a.x, 0.0, a.z), a, step(0.5, 2.0 - a.x - a.y));
a = lerp(float3(a.x, a.y, 0.0), a, step(0.5, 2.0 - a.x - a.z));
a = lerp(float3(a.x, a.y, 0.0), a, step(0.5, 2.0 - a.y - a.z));
hsl.x = dot( float3(0.0, 2.0, 4.0) + ((c.gbr - c.brg)
/ (epsilon + cd)), a );
hsl.x = (hsl.x + (1.0 - step(0.0, hsl.x) ) * 6.0 ) / 6.0;
return hsl;
}
/*
source: modified version of
https://stackoverflow.com/a/42261473/1092820
*/
float3 hsl2rgb(float3 c) {
float3 rgb = clamp(abs(fmod(c.x * 6.0 + float3(0.0, 4.0, 2.0),
6.0) - 3.0) - 1.0, 0.0, 1.0);
return c.z + c.y * (rgb - 0.5) * (1.0 - abs(2.0 * c.z - 1.0));
}
float4 frag(v2f_img i) : COLOR {
float3 sat = float3(_rSat, _gSat, _bSat);
float4 c = tex2D(_MainTex, i.uv);
float3 hslOrig = rgb2hsl(c.rgb);
float3 rgbFullSat = hsl2rgb(float3(hslOrig.x, 1, .5));
float3 diminishedrgb = rgbFullSat * sat;
float diminishedHue = rgb2hsl(diminishedrgb).x;
float diminishedSat = hslOrig.y * length(diminishedrgb);
float3 mix = float3(diminishedHue, diminishedSat, hslOrig.z);
float3 newc = hsl2rgb(mix);
float4 result = c;
result.rgb = newc;
return result;
}
ENDCG
}
}
}
If you're using URP (Universal Rendering Pipeline), which is recommended, you can create a new forward renderer pipeline asset, assign the shader to that asset, and configure it appropriately. Further information including diagrams can be found in the official unity tutorial for custom render passes with URP.
If you aren't using URP, you have other options. You could attach it to specific materials, or use the below script from Wikibooks to the camera's gameobject to apply a material using the above shader as a postprocessing effect to the camera:
using System;
using UnityEngine;
[RequireComponent(typeof(Camera))]
[ExecuteInEditMode]
public class PostProcessingEffectScript : MonoBehaviour {
public Material material;
void OnEnable()
{
if (null == material || null == material.shader ||
!material.shader.isSupported)
{
enabled = false;
}
}
void OnRenderImage(RenderTexture source, RenderTexture destination)
{
Graphics.Blit(source, destination, material);
}
}
If you use the postprocessing effect, you will want to render the things you want to exclude from the effect with a different camera, then put everything together. However, this is a bit out of scope for this answer.
My best guess would be to use a custom shader or camera FX that would gives you control over each channel.
Hope that helped ;)

Fragment shader fmod, why is this not repeating

I created the following fragment shader that creates a tile grid of size _Size using the fracfunction and draws a small seperator line in between each tile, I save the ID of the tile in its uv.z value so I can later adres the tile based on its id (uv.z).
_Size and _CurrentID can be adjusted through the inspector
Shader "Unlit/Fractals"
{
Properties
{
[HideInInspector] _MainTex ("Texture", 2D) = "white" {}
_Size ("Size", float) = 5
_CurrentID ("ID", float) = 0
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
float _Size;
float _CurrentID;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
return o;
}
fixed4 frag(v2f i) : SV_Target
{
_CurrentID = floor(_CurrentID);
//Create a tile grid that is of _Size * _Size (5 in example), and create an ID for it in the .z value based on its grid position
float3 uv = float3(frac(i.uv * _Size), (floor(i.uv.y * _Size) * _Size) + (floor(i.uv.x * _Size)));
//Create lines to seperate the tiles
float4 col = float4(1, 1, 1, 1);
if ((uv.x > 0.98 && uv.x < 1) || (uv.y > .98 && uv.y < 1))
{
col *= float4(uv.x, uv.y, 0, 1);
}
else
{
col = float4(0, 0, 0, 1);
}
//Loop through all the tiles based on the ID
if (uv.z == fmod(_CurrentID, ((_Size) * (_Size))))
{
col = float4(0, 1, 1, 1);
}
//This correctly goes through every grid tile once, confirming that uv grid ID 5 corresponds to grid position (0,1)
/*if (uv.z == _CurrentID)
{
col = float4(0, 1, 1, 1);
}*/
return col;
}
ENDCG
}
}
}
(note that the grid starts at (0,0) bottom left to (5,5) top right)
To ascertain that my ID's are set up correct I looped through each uv.z value with the floor of the _CurrentID set from the inspector, which lights up every tile once, when going from 0 to 24 (inclusive) as expected.
if (uv.z == _CurrentID)
{
col = float4(0, 1, 1, 1);
}
example of _CurrentID = 7 lighting up the 8th tile as expected
Now just using the _CurrentID would mean I can only go through every tile once. To make this repeatable regardless of how big _CurrentID is I should be able to use fmod (modulo) (although the same happens using the % modulo operator) on the _CurrentID so it loops back to 0 when CurrnetID = 25. Which I (try to) do using the following piece of code:
if (uv.z == fmod(_CurrentID, ((_Size) * (_Size))))
{
col = float4(0, 1, 1, 1);
}
This goes well for the first row (when _CurrentId >= 0 && < 5). However once I hit _CurrentID = 5 things start to break, as no tile will light up, despite previously being able to confirm that _CurrentID = 5 will light up the tile at grid (0, 1). When I set _CurrentID = 6 the proper tile starts lighting up again (grid pos (1,1)) which continues where grid (0, n) won't ever light up where n is greater than 0.
Example of _CurrentID = 5 using fmod.
Things start breaking even more once my CurrentID goes higher than 25, where it doesn't seem to modulo loop around at all. As seen in this gyazo gif. It just seems to light up random tiles.
Starting to doubt myself I double checked the modulo maths on WolframpAlpha, which seems correct.
I can "solve" the issue where it skips the first tile of every row by doing fmod(_CurrentID, ((_Size + 1) * (_Size + 1))), which will loop correctly through each tile on the first run (including the (0,n) tiles), but now my modulo starts looping at 36, after which it will still light up a random tile as shown in the gif.
What am I doing wrong here?
(Unity version 2020.1.1f1, same behavior confirmed in 2019.3.13)
It's probably a floating point precision issue since you are comparing floats for equality.
Instead of doing that you could write something like:
float id = _CurrentID % (_Size*_Size);
float epsilon = .0001f;
if (abs(uv.z - id) < epsilon)
{
col = float4(0, 1, 1, 1);
}
Or use ints for ids.

One shader function written different ways gives different results. (Unity 2017)

In our project on Unity 2017 we use our own PixelSnap function instead of UnityPixelSnap.
Recently we found that some fullscreen sprites have size on screen some less than the screen on some resolutions.
Here is our function with all return cases that I tried
#include "UnityCG.cginc"
float4 _UnityPixelSnap(float4 coord)
{
//[1]
return float4(floor((coord.xy * 0.5f + 0.5f ) * _ScreenParams.xy) * (_ScreenParams.zw - 1.0f) * 2.0f - 1.0f, coord.z, coord.w);
//[2]
return float4(floor((coord.xy * 0.5f + 0.5f ) * _ScreenParams.xy) / (_ScreenParams.xy) * 2.0f - 1.0f, coord.z, coord.w);
//[3]
return float4(floor((coord.xy * 0.5f) * _ScreenParams.xy) / (_ScreenParams.xy) * 2.0f, coord.z, coord.w);
//[4]
return float4(round((coord.xy * 0.5f) * _ScreenParams.xy) / (_ScreenParams.xy) * 2.0f, coord.z, coord.w);
}
In the next example, normally, right border of screen must be totally black.
Case 1,2 and 3 gives us vertical pixel line (picture)
But case 4 gives normal results.
If you will try to plot these functions, for example, in Matlab, for any _ScreenParams and array coord.xy (-1,1) with any step, you will see absolutely same graphics.
And please, don't ask me why we use Unity 2017 and use our own PixelSnap. I am new in this project and I need to discover this issue.
Thanks everybody!
Here is UnityCG.cginc UnityPixelSnap function for comparasion:
inline float4 UnityPixelSnap (float4 pos)
{
float2 hpc = _ScreenParams.xy * 0.5f;
#if SHADER_API_PSSL
// sdk 4.5 splits round into v_floor_f32(x+0.5) ... sdk 5.0 uses v_rndne_f32, for compatabilty we use the 4.5 version
float2 temp = ((pos.xy / pos.w) * hpc) + float2(0.5f,0.5f);
float2 pixelPos = float2(__v_floor_f32(temp.x), __v_floor_f32(temp.y));
#else
float2 pixelPos = round ((pos.xy / pos.w) * hpc);
#endif
pos.xy = pixelPos / hpc * pos.w;
return pos;
}
››

How to compute the radial distance of an object in a postprocessing vertex and fragment shader

After hours of Google, copy-pasting codes and playing around, I still could not find a solution to my problem.
I try to write a postprocessing shader using the vertex and fragment functions. My problem is that I do not know how to compute the radial distance of the current vertex to the camera position (or any other given position) in world coordinates.
My goal is the following:
Consider a very big 3D plane where the camera is on top and looks exactly down to the plane. I now want a postprocessing shader that draws a white line onto the plane, such that only those pixels that have a certain radial distance to the camera are painted white. The expected result would be a white circle (in this specific setup).
I know how to do this in principal, but the problem is that I cannot find out how to compute the radial distance to the vertex.
The problem here might be that this is a POSTPROCESSING shader. So this shader is not applied to a certain object. If I would do so, I could get the world coordinates of the vertex by using mul(unity_ObjectToWorld, v.vertex), but for postprocessing shaders this gives a nonsense value.
This is my debug code for this issue:
Shader "NonHidden/TestShader"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
Tags { "RenderType"="Transparent" "Queue"="Transparent-1"}
LOD 100
ZWrite Off
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 3.0
#include "UnityCG.cginc"
sampler2D _MainTex;
sampler2D _CameraDepthTexture;
uniform float4 _MainTex_TexelSize;
// V2F
struct v2f {
float4 outpos : SV_POSITION;
float4 worldPos : TEXCOORD0;
float3 rayDir : TEXCOORD1;
float3 camNormal : TEXCOORD2;
};
// Sample Depth
float sampleDepth(float2 uv) {
return Linear01Depth(
UNITY_SAMPLE_DEPTH(
tex2D(_CameraDepthTexture, uv)));
}
// VERTEX
v2f vert (appdata_tan v)
{
TANGENT_SPACE_ROTATION;
v2f o;
o.outpos = UnityObjectToClipPos(v.vertex);
o.worldPos = mul(unity_ObjectToWorld, v.vertex);
o.rayDir = mul(rotation, ObjSpaceViewDir(v.vertex));
o.camNormal = UNITY_MATRIX_IT_MV[2].xyz;
return o;
}
// FRAGMENT
fixed4 frag (v2f IN) : SV_Target
{
// Get uv coordinates
float2 uv = IN.outpos.xy * (_ScreenParams.zw - 1.0f);
// Flip y if necessary
#if UNITY_UV_STARTS_AT_TOP
if (_MainTex_TexelSize.y < 0)
{
uv.y = 1 - uv.y;
}
#endif
// Get depth
float depth = sampleDepth(uv);
// Set color
fixed4 color = 0;
if(depth.x < 1)
{
color.r = IN.worldPos.x;
color.g = IN.worldPos.y;
color.b = IN.worldPos.z;
}
return color;
}
ENDCG
}
}
}
CurrentState
This image shows the result when the camera looks down on the plane:
Image 1: Actual result
The blue value is (for whatever reason) 25 in every pixel. The red and green areas reflect the x-y coordinates of the screen.
Even if I rotate the camera a little bit, I get the exact same shading at the same screen coordinates:
That shows me that the computed "worldPos" coordinates are screen coordinates and have nothing to do with the world coordinates of the plane.
Expected Result
The result I expect to see is the following:
Here, pixels that have the same (radial) distance to the camera have the same color.
How do I need to change the above code to achieve this effect? With rayDir (computed in the vert function) I tried to get at least the direction vector from the camera center to the current pixel, such that I could compute the radial distance using the depth information. But rayDir has a constant value for all pixels ...
At this point I also have to say that I don't really understand what is computed inside the vert function. This is just stuff that I found on the internet and that I tried out.
Alright, I found some solution to my problem, since I found this video here: Shaders Case Study - No Man's Sky: Topographic Scanner
In the video description is a link to the corresponding GIT repository. I downloaded, analyzed and rewrote the code, such that it fits my purpose, is easier to read and understand.
The major thing I learned is, that there is no built-in way to compute the radial distance using post-processing shaders (correct me if I'm wrong!). So in order to get the radial distance, the only way seems to be in fact to use the direction vector from the camera to the vertex and the depth buffer. Since the direction vector is also not available in a built-in way, a trick is used:
Instead of using the Graphics.Blit function in the post-processing script, a custom Blit function can be used to set some additional shader variables. In this case, the frustum of the camera is stored in a second set of texture coordinates, which are then available in the shader code as TEXCOORD1. The trick here is that the according shader variable automatically contains an interpolated uv value, that is identical to the direction vector ("frustum ray") I was looking for.
The code of the calling script now looks as follows:
using UnityEngine;
using System.Collections;
[ExecuteInEditMode]
public class TestShaderEffect : MonoBehaviour
{
private Material material;
private Camera cam;
void OnEnable()
{
// Create a material that uses the desired shader
material = new Material(Shader.Find("Test/RadialDistance"));
// Get the camera object (this script must be assigned to a camera)
cam = GetComponent<Camera>();
// Enable depth buffer generation#
// (writes to the '_CameraDepthTexture' variable in the shader)
cam.depthTextureMode = DepthTextureMode.Depth;
}
[ImageEffectOpaque] // Draw after opaque, but before transparent geometry
void OnRenderImage(RenderTexture source, RenderTexture destination)
{
// Call custom Blit function
// (usually Graphics.Blit is used)
RaycastCornerBlit(source, destination, material);
}
void RaycastCornerBlit(RenderTexture source, RenderTexture destination, Material mat)
{
// Compute (half) camera frustum size (at distance 1.0)
float angleFOVHalf = cam.fieldOfView / 2 * Mathf.Deg2Rad;
float heightHalf = Mathf.Tan(angleFOVHalf);
float widthHalf = heightHalf * cam.aspect; // aspect = width/height
// Compute helper vectors (camera orientation weighted with frustum size)
Vector3 vRight = cam.transform.right * widthHalf;
Vector3 vUp = cam.transform.up * heightHalf;
Vector3 vFwd = cam.transform.forward;
// Custom Blit
// ===========
// Set the given destination texture as the active render texture
RenderTexture.active = destination;
// Set the '_MainTex' variable to the texture given by 'source'
mat.SetTexture("_MainTex", source);
// Store current transformation matrix
GL.PushMatrix();
// Load orthographic transformation matrix
// (sets viewing frustum from [0,0,-1] to [1,1,100])
GL.LoadOrtho();
// Use the first pass of the shader for rendering
mat.SetPass(0);
// Activate quad draw mode and draw a quad
GL.Begin(GL.QUADS);
{
// Using MultiTexCoord2 (TEXCOORD0) and Vertex3 (POSITION) to draw on the whole screen
// Using MultiTexCoord to write the frustum information into TEXCOORD1
// -> When the shader is called, the TEXCOORD1 value is automatically an interpolated value
// Bottom Left
GL.MultiTexCoord2(0, 0, 0);
GL.MultiTexCoord(1, (vFwd - vRight - vUp) * cam.farClipPlane);
GL.Vertex3(0, 0, 0);
// Bottom Right
GL.MultiTexCoord2(0, 1, 0);
GL.MultiTexCoord(1, (vFwd + vRight - vUp) * cam.farClipPlane);
GL.Vertex3(1, 0, 0);
// Top Right
GL.MultiTexCoord2(0, 1, 1);
GL.MultiTexCoord(1, (vFwd + vRight + vUp) * cam.farClipPlane);
GL.Vertex3(1, 1, 0);
// Top Left
GL.MultiTexCoord2(0, 0, 1);
GL.MultiTexCoord(1, (vFwd - vRight + vUp) * cam.farClipPlane);
GL.Vertex3(0, 1, 0);
}
GL.End(); // Finish quad drawing
// Restore original transformation matrix
GL.PopMatrix();
}
}
The shader code looks like this:
Shader "Test/RadialDistance"
{
Properties
{
_MainTex("Texture", 2D) = "white" {}
}
SubShader
{
// No culling or depth
Cull Off ZWrite Off ZTest Always
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct VertIn
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float4 ray : TEXCOORD1;
};
struct VertOut
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
float4 interpolatedRay : TEXCOORD1;
};
// Parameter variables
sampler2D _MainTex;
// Auto filled variables
float4 _MainTex_TexelSize;
sampler2D _CameraDepthTexture;
// Generate jet-color-sheme color based on a value t in [0, 1]
half3 JetColor(half t)
{
half3 color = 0;
color.r = min(1, max(0, 4 * t - 2));
color.g = min(1, max(0, -abs( 4 * t - 2) + 2));
color.b = min(1, max(0, -4 * t + 2));
return color;
}
// VERT
VertOut vert(VertIn v)
{
VertOut o;
// Get vertex and uv coordinates
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv.xy;
// Flip uv's if necessary
#if UNITY_UV_STARTS_AT_TOP
if (_MainTex_TexelSize.y < 0)
o.uv.y = 1 - o.uv.y;
#endif
// Get the interpolated frustum ray
// (generated the calling script custom Blit function)
o.interpolatedRay = v.ray;
return o;
}
// FRAG
float4 frag (VertOut i) : SV_Target
{
// Get the color from the texture
half4 colTex = tex2D(_MainTex, i.uv);
// flat depth value with high precision nearby and bad precision far away???
float rawDepth = DecodeFloatRG(tex2D(_CameraDepthTexture, i.uv));
// flat depth but with higher precision far away and lower precision nearby???
float linearDepth = Linear01Depth(rawDepth);
// Vector from camera position to the vertex in world space
float4 wsDir = linearDepth * i.interpolatedRay;
// Position of the vertex in world space
float3 wsPos = _WorldSpaceCameraPos + wsDir;
// Distance to a given point in world space coordinates
// (in this case the camera position, so: dist = length(wsDir))
float dist = distance(wsPos, _WorldSpaceCameraPos);
// Get color by distance (same distance means same color)
half4 color = 1;
half t = saturate(dist/100.0);
color.rgb = JetColor(t);
// Set color to red at a hard-coded distance -> red circle
if (dist < 50 && dist > 50 - 1 && linearDepth < 1)
{
color.rgb = half3(1, 0, 0);
}
return color * colTex;
}
ENDCG
}
}
}
I'm now able to achieve the desired effect:
But there are still some questions I have and I would be thankful if anyone could answer them for me:
Is there really no other way to get the radial distance? Using a direciton vector and the depth buffer is inefficient and inaccurate
I don't really understand the content of the rawDepth variable. I mean yes, it's some depth information, but if you use the depth information as texture color, you basically get a black image if you are not ridiculously close to an object. That leads to a very bad resolution for objects that are further away. How can anyone work with that?
I don't understand what exactly the Linear01Depth function does. Since the Unity documentation sucks in general, it also doesn't offer any information about this one as well

Vulkan: VkVertexInputBindingDescription always wrong with geometry shader

I'm trying to implement billboarded quads in a geomerty shader to render particle effects. The geometry shader input is points (vec3), and its output is a triangle strip with position and UV coordinates (vec3, vec2). I've tried two variations of vertex input bindings, but neither work.
If I set up the vertex binding like this:
VkVertexInputBindingDescription binding_desc[2] = {};
binding_desc[0].binding = 0;
binding_desc[0].stride = sizeof(glm::vec3);
binding_desc[0].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
binding_desc[1].binding = 1;
binding_desc[1].stride = sizeof(glm::vec2);
binding_desc[1].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
VkVertexInputAttributeDescription attribute_desc[2] = {};
attribute_desc[0].location = 0;
attribute_desc[0].binding = binding_desc[0].binding;
attribute_desc[0].format = VK_FORMAT_R32G32B32_SFLOAT;
attribute_desc[0].offset = offsetof(vert_shader_vertex, pos);
attribute_desc[1].location = 1;
attribute_desc[1].binding = binding_desc[1].binding;
attribute_desc[1].format = VK_FORMAT_R32G32_SFLOAT;
attribute_desc[1].offset = offsetof(vert_shader_vertex, uv);
I get the following error when calling vkCmdDraw:
ERROR [default] DS: (OBJECT 0) (CODE 24) The Pipeline State Object
(0x3c) expects that this Command Buffer's vertex binding Index 1
should be set via vkCmdBindVertexBuffers. This is because
VkVertexInputBindingDescription struct at index 1 of
pVertexBindingDescriptions has a binding value of 1.
However, if I set it up as this:
VkVertexInputBindingDescription binding_desc[1] = {};
binding_desc[0].binding = 0;
binding_desc[0].stride = sizeof(glm::vec3);
binding_desc[0].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
VkVertexInputAttributeDescription attribute_desc[1] = {};
attribute_desc[0].location = 0;
attribute_desc[0].binding = binding_desc[0].binding;
attribute_desc[0].format = VK_FORMAT_R32G32B32_SFLOAT;
attribute_desc[0].offset = offsetof(vert_shader_vertex, pos);
I get this error when calling vkCreateGraphicsPipelines:
ERROR [default] SC: (OBJECT 0) (CODE 3) Vertex shader consumes input
at location 1 but not provided
Does the VkVertexInputBindingDescription describe the input to the geometry shader, or the vertex shader?
Do I need "dummy" UV coordinates in my vertex buffer as a place holder?
Is it possible my geometry shader is not activated, and how can I confirm?
Which ever of the two approaches is correct, how do I address the corresponding error?
As an aside, I'm new to Vulkan so comments on the shaders are welcome.
Geometry shader
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_ARB_shading_language_420pack : enable
layout (points) in;
layout (triangle_strip, max_vertices = 4) out;
layout (location = 0) in vec3 inPos[];
layout (location = 0) out vec3 outPos;
layout (location = 1) out vec2 outUV;
layout (push_constant) uniform constants_t {
vec3 up;
vec3 right;
mat4x4 world;
mat4x4 projection;
} constants;
void main(void)
{
const vec3 pos = gl_in[0].gl_Position.xyz;
const vec3 up = constants.up;
const vec3 right = constants.right;
outPos = pos + up - right;
outUV = vec2(0, 0);
EmitVertex();
outPos = pos + up + right;
outUV = vec2(1, 0);
EmitVertex();
outPos = pos - up - right;
outUV = vec2(0, 1);
EmitVertex();
outPos = pos - up + right;
outUV = vec2(1, 1);
EmitVertex();
EndPrimitive();
}
Vertex shader
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_ARB_shading_language_420pack : enable
layout (location = 0) in vec3 inPos;
layout (location = 1) in vec2 inUV;
layout (location = 0) out vec4 outPos;
layout (location = 1) out vec2 outUV;
layout (push_constant) uniform constants_t {
vec3 up;
vec3 right;
mat4x4 world;
mat4x4 projection;
} constants;
void main(void) {
outUV = inUV;
outPos = vec4(inPos.xyz, 1.0) * constants.world * constants.projection;
}
vkCmdBindVertexBuffers
VkBuffer vertex_buffers[1] = {vertexBuffer};
VkDeviceSize vertex_offset[1] = {0};
vkCmdBindVertexBuffers(commandBuffer, 0, 1, vertex_buffers, vertex_offset);
vkCmdBindVertexBuffers(commandBuffer, 0, 1, vertex_buffers, vertex_offset);
This says that you're binding one buffer to index 0. Yet you told the pipeline when you created it that you would have two buffers bound.
Do not lie to Vulkan; it always knows (when you're using validation layers ;) ).
It is rather likely that you intended to have both vertex attributes use the same buffer object. I deduce this from the fact that you used offsetof to compute the relative offsets for them. If that is your intent, then you should have two vertex attributes that use the same buffer binding.
Does the VkVertexInputBindingDescription describe the input to the geometry shader, or the vertex shader?
It cannot describe the input to the GS because the first pipeline shader stage is the vertex shader. And creating a graphics pipeline without a VS is not possible.