I am developing a native (c++) plugin (windows only for now) for unity (2018.1.0f2).
The plugin downloads textures and meshes and provides them to the unity.
There is a LOT of boilerplate code that I would like to spare you of.
Anyway, the rendering is done like this:
void RegenerateCommandBuffer(CommandBuffer buffer, List<DrawTask> tasks)
{
buffer.Clear();
buffer.SetProjectionMatrix(cam.projectionMatrix); // protected Camera cam; cam = GetComponent<Camera>();
foreach (DrawTask t in tasks)
{
if (t.mesh == null)
continue;
MaterialPropertyBlock mat = new MaterialPropertyBlock();
bool monochromatic = false;
if (t.texColor != null)
{
var tt = t.texColor as VtsTexture;
mat.SetTexture(shaderPropertyMainTex, tt.Get());
monochromatic = tt.monochromatic;
}
if (t.texMask != null)
{
var tt = t.texMask as VtsTexture;
mat.SetTexture(shaderPropertyMaskTex, tt.Get());
}
mat.SetMatrix(shaderPropertyUvMat, VtsUtil.V2U33(t.data.uvm));
mat.SetVector(shaderPropertyUvClip, VtsUtil.V2U4(t.data.uvClip));
mat.SetVector(shaderPropertyColor, VtsUtil.V2U4(t.data.color));
// flags: mask, monochromatic, flat shading, uv source
mat.SetVector(shaderPropertyFlags, new Vector4(t.texMask == null ? 0 : 1, monochromatic ? 1 : 0, 0, t.data.externalUv ? 1 : 0));
buffer.DrawMesh((t.mesh as VtsMesh).Get(), VtsUtil.V2U44(t.data.mv), material, 0, -1, mat);
}
}
There are two control modes. Either the unity camera is controlled by the camera in the plugin, or the plugin camera is controlled by the unity camera. In my current scenario, the plugin camera is controlled by the unity camera. There is no special magic behind the scenes, but some of the transformations needs to be done in double precision to work without meshes 'jumping' around.
void CamOverrideView(ref double[] values)
{
Matrix4x4 Mu = mapTrans.localToWorldMatrix * VtsUtil.UnityToVtsMatrix;
// view matrix
if (controlTransformation == VtsDataControl.Vts)
cam.worldToCameraMatrix = VtsUtil.V2U44(Math.Mul44x44(values, Math.Inverse44(VtsUtil.U2V44(Mu))));
else
values = Math.Mul44x44(VtsUtil.U2V44(cam.worldToCameraMatrix), VtsUtil.U2V44(Mu));
}
void CamOverrideParameters(ref double fov, ref double aspect, ref double near, ref double far)
{
// fov
if (controlFov == VtsDataControl.Vts)
cam.fieldOfView = (float)fov;
else
fov = cam.fieldOfView;
// near & far
if (controlNearFar == VtsDataControl.Vts)
{
cam.nearClipPlane = (float)near;
cam.farClipPlane = (float)far;
}
else
{
near = cam.nearClipPlane;
far = cam.farClipPlane;
}
}
And a shader:
Shader "Vts/UnlitShader"
{
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
Pass
{
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct vIn
{
float4 vertex : POSITION;
float2 uvInternal : TEXCOORD0;
float2 uvExternal : TEXCOORD1;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uvTex : TEXCOORD0;
float2 uvClip : TEXCOORD1;
};
struct fOut
{
float4 color : SV_Target;
};
sampler2D _MainTex;
sampler2D _MaskTex;
float4x4 _UvMat;
float4 _UvClip;
float4 _Color;
float4 _Flags; // mask, monochromatic, flat shading, uv source
v2f vert (vIn i)
{
v2f o;
o.vertex = UnityObjectToClipPos(i.vertex);
o.uvTex = mul((float3x3)_UvMat, float3(_Flags.w > 0 ? i.uvExternal : i.uvInternal, 1.0)).xy;
o.uvClip = i.uvExternal;
return o;
}
fOut frag (v2f i)
{
fOut o;
// texture color
o.color = tex2D(_MainTex, i.uvTex);
if (_Flags.y > 0)
o.color = o.color.rrra; // monochromatic texture
// uv clipping
if ( i.uvClip.x < _UvClip.x
|| i.uvClip.y < _UvClip.y
|| i.uvClip.x > _UvClip.z
|| i.uvClip.y > _UvClip.w)
discard;
// mask
if (_Flags.x > 0)
{
if (tex2D(_MaskTex, i.uvTex).r < 0.5)
discard;
}
// uniform tint
o.color *= _Color;
return o;
}
ENDCG
}
}
}
It all works perfectly - in editor. It also works well in standalone DEVELOPMENT build. But the transformations get wrong when in 'deploy' builds. The rendered parts look as if they were rotated around wrong axes or with different polarities.
Can you spot some obvious mistakes?
My first suspect was OpenGL vs DirectX differences, but the 'deploy' and 'development' builds should use the same, should they not? Moreover, I have tried changing the player setting to force one or the other, but without any differences.
Edit:
Good image: https://drive.google.com/open?id=1RTlVZBSAj7LIml1sBCX7nYTvMNaN0xK-
Bad image: https://drive.google.com/open?id=176ahft7En6MqT-aS2RdKXOVW68NmvK2L
Note how the terrain is correctly aligned with the atmosphere.
Steps to reproduce
1) Create a new project in unity
2) Download the assets https://drive.google.com/open?id=18uKuiya5XycjGWEcsF-xjy0fn7sf-D82 and extract them into the newly created project
3) Try it in editor -> should work ok (it will start downloading meshes and textures from us, so be patient; the downloaded resources are cached in eg. C://users//.cache/vts-browser)
The plane is controlled by mouse with LMB pressed.
4) Build in development build and run -> should work ok too
5) Build NOT in development build and run -> the terrain transformations behave incorrectly.
Furthermore, I have published the repository. Here is the unity-specific code: https://github.com/Melown/vts-browser-unity-plugin
Unfortunately, I did not intend to publish it this soon, so the repository is missing some formal things like readme and build instructions. Most information can, however, be found in the submodules.
CommandBuffer.SetProjectionMatrix apparently needs a matrix that has been adjusted by GL.GetGPUProjectionMatrix.
buffer.SetProjectionMatrix(GL.GetGPUProjectionMatrix(cam.projectionMatrix, false));
Unfortunately, I still do not understand why would this cause a different behavior between deploy and development builds. I would have expected it to only make difference on different platforms.
Related
I already posted this question on unity answers yesterday, but maybe anyone here can help? I've been trying to do some stuff that involves getting an image from a native plugin (in the form of a .dll file). I load the image data into a native buffer and then push that to the gpu in the form of a structured compute buffer. From there, I display the image using a shader (basically just doing something like uint idx = x + y * width to get the correct index). And this works great on my laptop (ignore the low resolution, I lowered it to be able to inspect the values for each pixel; this is exactly how it's supposed to look).
But when I try it on my desktop, all I get is this mess:
It's clearly displaying something, I'm almost able to make out contours of the text (it doesn't seem like I'm just getting random noise). But I can't seem to work out what's wrong here.
So far I've tried:
syncing the code across the two devices (it's excactly the same)
changing the unity version (tried 2020.3.26f1 and 2021.2.12f on both machines)
updating the graphics drivers
checking the directx version (DirectX 12 on both)
changing the editor game window resolution
comparing the contents of the buffer (the ComputeBuffer.GetData method is getting the same completely valid values on both machines)
building the project on both machines (both builds are working on my laptop and broken on my desktop)
Especially the last point really confused me. I'm running the same executable on both machines and it's working on my laptop with integrated graphics (not sure wether that could be relevant) but not on my desktop with a more modern dedicated gpu? The only idea I have left is that there might be some kind of optimization going on with my desktop's amd gpu that's not happening on my laptop's intel gpu. Any ideas on what I could try in the radeon software? Maybe it could even be some sort of bug (with unity or with my graphics driver)?
I'd be more than happy about any ideas on what could be the problem here (cause I have no clue at this point). And sorry if my grammar is a bit off at times, not a native speaker.
EDIT: Here's the shader I use to display the image.
Shader "Hidden/ReadUnpacked"
{
Properties
{
_MainTex("Texture", 2D) = "white" {}
}
SubShader
{
// No culling or depth
Cull Off ZWrite Off ZTest Always
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
static const uint PACKED_SIZE = 3;
static const uint PIXELS_PER_PACK = 4;
static const uint BYTES_PER_PIXEL = 8;
static const uint PERCISION = 0xFF; // 0xFF = 2^8
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
struct packed4
{
uint p[PACKED_SIZE];
};
struct unpacked4
{
fixed4 p[PIXELS_PER_PACK];
};
StructuredBuffer<packed4> InputBuffer;
uint ImgIdx;
float2 Resolution;
float2 TexelOffset;
fixed unpackSingle(packed4 val, uint idx)
{
uint pid = idx / PIXELS_PER_PACK; // pixel index
uint sid = idx % PIXELS_PER_PACK * BYTES_PER_PIXEL; // shift index
return ((val.p[pid] >> sid) & PERCISION) / (half)PERCISION;
}
unpacked4 unpack(packed4 packed)
{
unpacked4 unpacked;
half r, g, b;
uint idx = 0;
[unroll(PIXELS_PER_PACK)] for (uint i = 0; i < PIXELS_PER_PACK; i++)
{
fixed4 upx = fixed4(0, 0, 0, 1);
[unroll(PACKED_SIZE)] for (uint j = 0; j < PACKED_SIZE; j++)
{
upx[j] = unpackSingle(packed, idx++);
}
unpacked.p[i] = upx;
}
return unpacked;
}
fixed4 samplePackedBuffer(float2 uv)
{
int2 tc = float2(uv.x, 1 - uv.y) * Resolution;
uint idx = tc.x + tc.y * Resolution.x; // image pixel index
idx += Resolution.x * Resolution.y * ImgIdx;
uint gid = floor(idx / PIXELS_PER_PACK); // packed global index
uint lid = idx % PIXELS_PER_PACK; // packed local index
packed4 ppx = InputBuffer[gid];
unpacked4 upx = unpack(ppx);
return upx.p[lid];
}
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
fixed4 frag(v2f i) : SV_Target
{
fixed4 col = samplePackedBuffer(i.uv);
return col;
}
ENDCG
}
}
}
You should check all other 3D APIs (D3D11, Vulkan, OpenGL,...).
I am looking for a glass shader for Unity that only refracts the objects behind it, or ideas for how to modify an existing glass shader to do that.
This screenshot shows what happens when I use FX/Glass/Stained BumpDistort on a curved plane mesh.
As you can see, the glass shader refracts both the sphere in front of the mesh and the ground behind it. I am looking for a shader that will only refract the objects behind it.
Here is the code for that shader, for reference:
// Per pixel bumped refraction.
// Uses a normal map to distort the image behind, and
// an additional texture to tint the color.
Shader "FX/Glass/Stained BumpDistort" {
Properties {
_BumpAmt ("Distortion", range (0,128)) = 10
_MainTex ("Tint Color (RGB)", 2D) = "white" {}
_BumpMap ("Normalmap", 2D) = "bump" {}
}
Category {
// We must be transparent, so other objects are drawn before this one.
Tags { "Queue"="Transparent" "RenderType"="Opaque" }
SubShader {
// This pass grabs the screen behind the object into a texture.
// We can access the result in the next pass as _GrabTexture
GrabPass {
Name "BASE"
Tags { "LightMode" = "Always" }
}
// Main pass: Take the texture grabbed above and use the bumpmap to perturb it
// on to the screen
Pass {
Name "BASE"
Tags { "LightMode" = "Always" }
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata_t {
float4 vertex : POSITION;
float2 texcoord: TEXCOORD0;
};
struct v2f {
float4 vertex : SV_POSITION;
float4 uvgrab : TEXCOORD0;
float2 uvbump : TEXCOORD1;
float2 uvmain : TEXCOORD2;
UNITY_FOG_COORDS(3)
};
float _BumpAmt;
float4 _BumpMap_ST;
float4 _MainTex_ST;
v2f vert (appdata_t v)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
#if UNITY_UV_STARTS_AT_TOP
float scale = -1.0;
#else
float scale = 1.0;
#endif
o.uvgrab.xy = (float2(o.vertex.x, o.vertex.y*scale) + o.vertex.w) * 0.5;
o.uvgrab.zw = o.vertex.zw;
o.uvbump = TRANSFORM_TEX( v.texcoord, _BumpMap );
o.uvmain = TRANSFORM_TEX( v.texcoord, _MainTex );
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
sampler2D _GrabTexture;
float4 _GrabTexture_TexelSize;
sampler2D _BumpMap;
sampler2D _MainTex;
half4 frag (v2f i) : SV_Target
{
// calculate perturbed coordinates
half2 bump = UnpackNormal(tex2D( _BumpMap, i.uvbump )).rg; // we could optimize this by just reading the x & y without reconstructing the Z
float2 offset = bump * _BumpAmt * _GrabTexture_TexelSize.xy;
i.uvgrab.xy = offset * i.uvgrab.z + i.uvgrab.xy;
half4 col = tex2Dproj( _GrabTexture, UNITY_PROJ_COORD(i.uvgrab));
half4 tint = tex2D(_MainTex, i.uvmain);
col *= tint;
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
ENDCG
}
}
// ------------------------------------------------------------------
// Fallback for older cards and Unity non-Pro
SubShader {
Blend DstColor Zero
Pass {
Name "BASE"
SetTexture [_MainTex] { combine texture }
}
}
}
}
My intuition is that it has to do with the way that _GrabTexture is captured, but I'm not entirely sure. I'd appreciate any advice. Thanks!
No simple answer for this.
You cannot think about refraction without thinking about the context in some way, so let's see:
Basically, it's not easy to define when an object is "behind" another one. There are different ways to even meassure a point's distance to the camera, let alone accounting for the whole geometry. There are many strange situations where geometry intersects, and the centers and bounds could be anywhere.
Refraction is usually easy to think about in raytracing algorithms (you just march a ray and calculate how it bounces/refracts to get the colors). But here in raster graphics (used for 99% of real-time graphics), the objects are rendered as a whole, and in turns.
What is going on with that image is that the background and ball are rendered first, and the glass later. The glass doesn't "refract" anything, it just draws itself as a distortion of whatever was written in the render buffer before.
"Before" is key here. You don't get "behinds" in raster graphics, everything is done by being conscious of rendering order. Let's see how some refractions are created:
Manually set render queue tags for the shaders, so you know at what point in the pipeline they are drawn
Manually set each material's render queue
Create a script that constantly marshals the scene and every frame calculates what should be drawn before or after the glass according to position or any method you want, and set up the render queues in the materials
Create a script that render the scene filtering out (through various methods) the objects that shouldn't be refracted, and use that as the texture to refract (depending on the complexity of the scene, this is sometimes necessary)
These are just some options off the top of my head, everything depends on your scene
My advice:
Select the ball's material
Right-click on the Inspector window --> Tick on "Debug" mode
Set the Custom Render Queue to 2200 (after the regular geometry is drawn)
Select the glass' material
Set the Custom Render Queue to 2100 (after most geometry, but before the ball)
I'm attempting to build a webiste made in unity to WebGL using the unity 5 beta.
A custom shader I wrote (or more accurately edited from an existing one) no longer works in Unity 5.
Heres what the shader is supposed to do. Create a metaball effect where the alpha ramps up in a circular curve.
Shader turns this..
into this.. (via a render texture)
Heres the whole thing..
//Water Metaball Shader effect by Rodrigo Fernandez Diaz-2013
//Visit http://codeartist.info/ for more!!
Shader "Custom/Metaballs" {
Properties {
_MyColor ("Some Color", Color) = (1,1,1,1)
_MainTex ("Texture", 2D) = "white" { }
_botmcut ("bottom cutoff", Range(0,1)) = 0.1
_topcut ("top cutoff", Range(0,4)) = 0.8
_constant ("curvature constant", Range(0,5)) = 1
}
SubShader {
Tags {"Queue" = "Transparent" }
Pass {
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
float4 _MyColor;
float4 _Color;
sampler2D _MainTex;
float _botmcut,_topcut,_constant;
struct v2f {
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
};
float4 _MainTex_ST;
v2f vert (appdata_base v){
v2f o;
o.pos = mul (UNITY_MATRIX_MVP, v.vertex);
o.uv = TRANSFORM_TEX (v.texcoord, _MainTex);
return o;
}
half4 frag (v2f i) : COLOR{
half4 texcol,finalColor;
texcol = tex2D (_MainTex, i.uv);
//finalColor=_Color*texcol;
finalColor=_MyColor;
if(texcol.a<_botmcut)
{
finalColor.a= 0;
}
else if((texcol.a>_topcut))
{
finalColor.a= 0;
}
else
{
float r = _topcut-_botmcut;
float xpos = _topcut - texcol.a;
finalColor.a= 1-(_botmcut + sqrt((xpos*xpos)-(r*r)))/_constant;
}
return finalColor;
}
ENDCG
}
}
Fallback "VertexLit"
}
The problem I am having in Unity 5 is that the resulting texture is blank. ie. 0 alpha.
The bit that seems to be causing the problem is this one.
else
{
float r = _topcut-_botmcut;
float xpos = _topcut - texcol.a;
finalColor.a= 1-(_botmcut + sqrt((xpos*xpos)-(r*r)))/_constant;
}
If I comment out the last line of this "finalCOlor...etc etc" then I see something
This is the line that normally creates that circular alpha curve, but in unity 5 it is always resolving to 0 it seems. Has there been some API change? because the math should work out identically to how it worked in unity 4.
Ps. I dont know much about shaders!
A few things that I normally do when tracking down shader issues.
Option 1
Try using PIX or some other standard program to debug the shader. You just need to capture the frame and right click on the pixel and hit debug. I'd pay close attention to what each value is, make sure none are set to 0 that shouldn't be. Also verify in this tool the right textures are being used.
Options 2
If you set finalColor.a to 0.5 does this do anything? If this does you know the issue is in one of your variables being 0. Should _constant even allow the range of 0? I think that should be from >0 to 5 honestly. Also verify you haven't overriden any of the constants or variables on the material, make sure they are still all set to the default. You might even want to just hard set them in the shader to see if that fixes the problem.
Finally, solving shader problems are not easy, but the fact that it worked in Unity 4 and doesn't in 5 tells me that you are probably just resolving something to 0, so I would check that first.
I have no idea why..But changing this line..
finalColor.a= 1-(_botmcut + sqrt((xpos*xpos)-(r*r)))/_constant;
to this..
finalColor.a= 1-(_botmcut + sqrt((r*r)-(xpos*xpos)))/_constant;
Worked.
It doesnt make sense!
I've written a shader and it works fine when I added it in a plane located in front of camera (in this case camera does not have shader). but then I add this shader to the camera, it does not show anything on the screen. Herein is my code, could you let me know how can I change it to be compatible with Camera.RenderWithShader method?
Shader "Custom/she1" {
Properties {
top("Top", Range(0,2)) = 1
bottom("Bottom", Range(0,2)) = 1
}
SubShader {
// Draw ourselves after all opaque geometry
Tags { "Queue" = "Transparent" }
// Grab the screen behind the object into _GrabTexture
GrabPass { }
// Render the object with the texture generated above
Pass {
CGPROGRAM
#pragma debug
#pragma vertex vert
#pragma fragment frag
#pragma target 3.0
sampler2D _GrabTexture : register(s0);
float top;
float bottom;
struct data {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct v2f {
float4 position : POSITION;
float4 screenPos : TEXCOORD0;
};
v2f vert(data i){
v2f o;
o.position = mul(UNITY_MATRIX_MVP, i.vertex);
o.screenPos = o.position;
return o;
}
half4 frag( v2f i ) : COLOR
{
float2 screenPos = i.screenPos.xy / i.screenPos.w;
float _half = (top + bottom) * 0.5;
float _diff = (bottom - top) * 0.5;
screenPos.x = screenPos.x * (_half + _diff * screenPos.y);
screenPos.x = (screenPos.x + 1) * 0.5;
screenPos.y = 1-(screenPos.y + 1) * 0.5 ;
half4 sum = half4(0.0h,0.0h,0.0h,0.0h);
sum = tex2D( _GrabTexture, screenPos);
return sum;
}
ENDCG
}
}
Fallback Off
}
I think what your asking for is a replacement shader that shades everything in the camera with your shader.
Am I correct?
If so this should work
Camera.main.SetReplacementShader(Shader.Find("Your Shader"),"RenderType")
here is some more info:
http://docs.unity3d.com/Documentation/Components/SL-ShaderReplacement.html
Edit: Are you expecting the entire camera to warp like a lens effect? Because your not going to get that using a shader like this by itself, because as it stands it will only apply to objects like your plane but not the full camera view, that requires a post image effect. First your need Unity Pro. If you do, import the Image effects package and look at the fisheye script. See if you can duplicate the fisheye script with your own shader. When I attached the fisheye shader without its corresponding script I was getting the same exact results as you are with your current shader code. If you dont have access to the image effects package let me know and ill send your the fisheye scripts and shaders.
I have tried several ways so far. The shader itself work very well when I add it to a plane located in front of main Camera. But when I add it to the main Camera by below code, nothing could be visible on the screen! (just a blank screen) without any error message. I assign the above shader to repl variable.
using UnityEngine;
using System.Collections;
public class test2 : MonoBehaviour {
// Use this for initialization
public Shader repl = null;
void Start () {
Camera.main.SetReplacementShader(repl,"Opaque");
}
// Update is called once per frame
void Update () {
}
}
Just for your information, the above shader distorts the scene to a trapezium shape.
I am outputting depth in Cg in a branch, like so:
ZWrite On
..
void frag(v2f IN, out color : COLOR, out depth : DEPTH) {
if (statement) {
color = float4(1);
} else {
color = float4(0);
depth = 0;
}
}
However as you see I omit writing the depth in the first condition. This results in undefined behaviour, but I believe this is common practice in GLSL (omitting writing to glFragDepth will result in the original depth).
What should I do to get the original depth in the first condition in Cg when having a depth value for output?
YMMV w/ this script. The code, as I recall, needed to be targeted to old implementations of OpenGL or else you'd get an error like shader registers cannot be masked related to this D3D issue.
But I believe you can pull the depth from the camera depth texture and rewrite it out. You do need to calculate a projected position first using ComputeScreenPos in the vertex shader. Documentation is non-existent, AFAIK, for the functions Linear01Depth and LinearEyeDepth so I can't tell you what the performance hit might be.
Shader "Depth Shader" { // defines the name of the shader
SubShader { // Unity chooses the subshader that fits the GPU best
Pass { // some shaders require multiple passes
ZWrite On
CGPROGRAM // here begins the part in Unity's Cg
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct v2f
{
float4 position : POSITION;
float4 projPos : TEXCOORD1;
};
v2f vert(float4 vertexPos : POSITION)
{
v2f OUT;
OUT.position = mul(UNITY_MATRIX_MVP, vertexPos);
OUT.projPos = ComputeScreenPos(OUT.position);
return OUT;
}
//camera depth texture here
uniform sampler2D _CameraDepthTexture; //Depth Texture
void frag(v2f IN, out float4 color:COLOR, out float depth:DEPTH) // fragment shader
{
color = float4(0);
// use eye depth for actual z...
depth = LinearEyeDepth (tex2Dproj(_CameraDepthTexture, UNITY_PROJ_COORD(IN.projPos)).r);
//or this for depth in between [0,1]
//depth = Linear01Depth (tex2Dproj(_CameraDepthTexture, UNITY_PROJ_COORD(IN.projPos)).r);
}
ENDCG // here ends the part in Cg
}
}
}