I'm attempting to build a webiste made in unity to WebGL using the unity 5 beta.
A custom shader I wrote (or more accurately edited from an existing one) no longer works in Unity 5.
Heres what the shader is supposed to do. Create a metaball effect where the alpha ramps up in a circular curve.
Shader turns this..
into this.. (via a render texture)
Heres the whole thing..
//Water Metaball Shader effect by Rodrigo Fernandez Diaz-2013
//Visit http://codeartist.info/ for more!!
Shader "Custom/Metaballs" {
Properties {
_MyColor ("Some Color", Color) = (1,1,1,1)
_MainTex ("Texture", 2D) = "white" { }
_botmcut ("bottom cutoff", Range(0,1)) = 0.1
_topcut ("top cutoff", Range(0,4)) = 0.8
_constant ("curvature constant", Range(0,5)) = 1
}
SubShader {
Tags {"Queue" = "Transparent" }
Pass {
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
float4 _MyColor;
float4 _Color;
sampler2D _MainTex;
float _botmcut,_topcut,_constant;
struct v2f {
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
};
float4 _MainTex_ST;
v2f vert (appdata_base v){
v2f o;
o.pos = mul (UNITY_MATRIX_MVP, v.vertex);
o.uv = TRANSFORM_TEX (v.texcoord, _MainTex);
return o;
}
half4 frag (v2f i) : COLOR{
half4 texcol,finalColor;
texcol = tex2D (_MainTex, i.uv);
//finalColor=_Color*texcol;
finalColor=_MyColor;
if(texcol.a<_botmcut)
{
finalColor.a= 0;
}
else if((texcol.a>_topcut))
{
finalColor.a= 0;
}
else
{
float r = _topcut-_botmcut;
float xpos = _topcut - texcol.a;
finalColor.a= 1-(_botmcut + sqrt((xpos*xpos)-(r*r)))/_constant;
}
return finalColor;
}
ENDCG
}
}
Fallback "VertexLit"
}
The problem I am having in Unity 5 is that the resulting texture is blank. ie. 0 alpha.
The bit that seems to be causing the problem is this one.
else
{
float r = _topcut-_botmcut;
float xpos = _topcut - texcol.a;
finalColor.a= 1-(_botmcut + sqrt((xpos*xpos)-(r*r)))/_constant;
}
If I comment out the last line of this "finalCOlor...etc etc" then I see something
This is the line that normally creates that circular alpha curve, but in unity 5 it is always resolving to 0 it seems. Has there been some API change? because the math should work out identically to how it worked in unity 4.
Ps. I dont know much about shaders!
A few things that I normally do when tracking down shader issues.
Option 1
Try using PIX or some other standard program to debug the shader. You just need to capture the frame and right click on the pixel and hit debug. I'd pay close attention to what each value is, make sure none are set to 0 that shouldn't be. Also verify in this tool the right textures are being used.
Options 2
If you set finalColor.a to 0.5 does this do anything? If this does you know the issue is in one of your variables being 0. Should _constant even allow the range of 0? I think that should be from >0 to 5 honestly. Also verify you haven't overriden any of the constants or variables on the material, make sure they are still all set to the default. You might even want to just hard set them in the shader to see if that fixes the problem.
Finally, solving shader problems are not easy, but the fact that it worked in Unity 4 and doesn't in 5 tells me that you are probably just resolving something to 0, so I would check that first.
I have no idea why..But changing this line..
finalColor.a= 1-(_botmcut + sqrt((xpos*xpos)-(r*r)))/_constant;
to this..
finalColor.a= 1-(_botmcut + sqrt((r*r)-(xpos*xpos)))/_constant;
Worked.
It doesnt make sense!
Related
I have created the following gradient that takes an Image components source image and apply a two colour gradient to it. Using a toggle it can be switched to using the Source image's alpha for the gradient alpha, or set the alpha per gradient colour.
Properties
{
[PerRendererData] _MainTex ("Texture", 2D) = "white" {}
[Header(Colours)]
_Color1("Color 1", Color) = (0,0,0,1)
_Color2("Color 2", Color) = (1,1,1,1)
[Toggle]_UseImageAlpha("Use Image alpha", float) = 0
[Header(Cull mode)]
[Enum(UnityEngine.Rendering.CullMode)] _CullMode("Cull mode", float) = 2
[Header(ZTest)]
[Enum(UnityEngine.Rendering.CompareFunction)] _ZTest("ZTest", float) = 4
[Toggle(UNITY_UI_ALPHACLIP)] _UseUIAlphaClip("Use Alpha Clip", Float) = 1
}
SubShader
{
Tags {"Queue" = "Transparent" "RenderType"="Transparent"}
LOD 100
Blend SrcAlpha OneMinusSrcAlpha
ZTest [_ZTest]
Cull [_CullMode]
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_local _ UNITY_UI_ALPHACLIP
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
fixed4 col : COLOR;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
fixed4 col : COLOR;
};
sampler2D _MainTex;
float4 _MainTex_ST;
fixed4 _Color1;
fixed4 _Color2;
bool _UseImageAlpha;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
o.col = v.col;
return o;
}
fixed4 frag (v2f i) : SV_Target
{
if (_UseImageAlpha) {
_Color1.a = i.col.a;
_Color2.a = i.col.a;
}
fixed4 col = tex2D(_MainTex, i.uv);
col *= lerp(_Color1, _Color2, i.uv.y);
col.a = clamp(col.a, 0, 1);
#ifdef UNITY_UI_ALPHACLIP
clip(col.a - .001);
#endif
return col;
}
ENDCG
}
}
This shader works fine and shows the gradient as expected, however once I start adding multiple layers of Images (in example a blue square behind it, and a green quare in front of it) it starts having issues with Z fighting in the scene view only based on the angle of the scene camera with the object that comes next in the hierachy (in this example the green square). In the Game view and on builds the Z fighting doesn't occur.
I am using the default LessEqual ZTest option, with back culling and render queue set to 3000 (which is the same as the render queue for the image in front and behind of it). As per Unity's documentation having it set to LessEqual should make it so Objects in front get drawn on top, and objects behind get hidden:
How should depth testing be performed. Default is LEqual (draw objects in from or at the distance as existing objects; hide objects behind them).
Setting the ZTest to any of the other options (off, always, greaterEqual etc) doens't yield a better result.
If I set the Render queue higher (3001) it will always draw the gradient on top in the Scene view (no changes in the Game view) whereas setting it to 2999 will still make it z fight with the object in front of it (green square), while making the blue square behind it transparent.
When I only have the green square in front of the gradient it will z fight in some places, while cutting out the green square in other places where the source image doesn't have any pixels.
Using the alpha of the source image, or using the alpha of the two individual colours does not make a difference either.
(gyazo) Example gif of the fighting changing depending on the camera angle.
What is causing this z fighting, and why does it only occur in the scene view?
Using Unity 2019.3.13f1, same results in 2019.2, 2019.1m 2018.4 LTS, 2017 LTS on Windows.
Try adding ZWrite Off. With shaders it might be useful just to start with (or at least look at) one of Unity's built-in shaders that is close to what you want. In your case that would be UI-Default.shader.
I've only just started learning Unity, but because I come from a background of coding in C#, I've found the standard scripting to be very quick to learn. Unfortunately, I've now come across a problem for which I believe a custom shader is required and I'm completely lost when it comes to shaders.
Scenario:
I'm using a custom distance scaling process so that really big, far away objects are moved within a reasonable floating point precision range from the player. This works great and handles scaling of the objects based on their adjusted distance so they appear to actually be really far away. The problem occurs though when two of these objects pass close to eachother in game space (this would still be millions of units apart in real scale) because they visibly collide.
Ex: https://www.youtube.com/watch?v=KFnuQg4R8NQ
Attempted Solution 1:
I've looked into flattening the objects along the player's view axis and this fixes the collision, but this affects shading and particle effects so wasn't a good option
Attempted Solution 2:
I've tried changing the RenderOrder, but because sometimes one object is inside the mesh of another (though the centre of this object is still closer to the camera) it doesn't fix the issue and particle effects are problematic again.
Attempted Solution 3:
I've tried moving the colliding objects to their own layer, spawning a new camera with a higher depth at the same position as my main camera and forcing the cameras to only see the items on their respective layers, but this caused lighting issues as some objects are lighting others and I had only a limited number of layers so this solution was quite limiting as it forced me to only have a low number of objects that could be overlapping at a time. NOTE: this solution is probably the closest I was able to come to what I need though.
Ex: https://www.youtube.com/watch?v=CyFDgimJ2-8
Attempted Solution 4:
I've tried updating the Standard shader code by downloading it from Unity's downloads page and creating my own, custom shader that allows me to modify the ZWrite and ZTest properties, but because I've no real understanding of how these work, I'm not getting anywhere.
Request:
I would greatly appreciate a Shader script code example of how I can programmatically force one object who's mesh is either colliding with or completely inside another mesh to render in front of said mesh. I'm hoping I can then take that example and apply it to all the shaders that I'm currently using (Standard, Particle Additive) to achieve the effect I'm looking for. Thanks in advance for your help.
In the gif below both objects are colliding and according to the camera position the cube is in front of the sphere but I can change their visibility with the render queue:
If that's what you want you only have to add ZWrite Off in your subshader before the CGPROGRAM starts, the following is the Standard Surface Shader including the line:
Shader "Custom/Shader" {
Properties {
_Color ("Color", Color) = (1,1,1,1)
_MainTex ("Albedo (RGB)", 2D) = "white" {}
_Glossiness ("Smoothness", Range(0,1)) = 0.5
_Metallic ("Metallic", Range(0,1)) = 0.0
}
SubShader {
Tags { "RenderType"="Opaque" }
LOD 200
ZWrite Off
CGPROGRAM
// Physically based Standard lighting model, and enable shadows on all light types
#pragma surface surf Standard fullforwardshadows
// Use shader model 3.0 target, to get nicer looking lighting
#pragma target 3.0
sampler2D _MainTex;
struct Input {
float2 uv_MainTex;
};
half _Glossiness;
half _Metallic;
fixed4 _Color;
// Add instancing support for this shader. You need to check 'Enable Instancing' on materials that use the shader.
// See https://docs.unity3d.com/Manual/GPUInstancing.html for more information about instancing.
// #pragma instancing_options assumeuniformscaling
UNITY_INSTANCING_BUFFER_START(Props)
// put more per-instance properties here
UNITY_INSTANCING_BUFFER_END(Props)
void surf (Input IN, inout SurfaceOutputStandard o) {
// Albedo comes from a texture tinted by color
fixed4 c = tex2D (_MainTex, IN.uv_MainTex) * _Color;
o.Albedo = c.rgb;
// Metallic and smoothness come from slider variables
o.Metallic = _Metallic;
o.Smoothness = _Glossiness;
o.Alpha = c.a;
}
ENDCG
}
FallBack "Diffuse"
}
Now sorting particles, look at the shadows and how they collide and how we can change their visibility regardless of their position.
Here's the shader for particles, I'm using the Unity Built-in shader, the only thing added is Ztest Always
Shader "Particles/Alpha Blended Premultiply Custom" {
Properties {
_MainTex ("Particle Texture", 2D) = "white" {}
_InvFade ("Soft Particles Factor", Range(0.01,3.0)) = 1.0
}
Category {
Tags { "Queue"="Transparent" "IgnoreProjector"="True" "RenderType"="Transparent" "PreviewType"="Plane" }
ZTest Always
Blend SrcAlpha OneMinusSrcAlpha
ColorMask RGB
Cull Off Lighting Off ZWrite Off
SubShader {
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 2.0
#pragma multi_compile_particles
#pragma multi_compile_fog
#include "UnityCG.cginc"
sampler2D _MainTex;
fixed4 _TintColor;
struct appdata_t {
float4 vertex : POSITION;
fixed4 color : COLOR;
float2 texcoord : TEXCOORD0;
UNITY_VERTEX_INPUT_INSTANCE_ID
};
struct v2f {
float4 vertex : SV_POSITION;
fixed4 color : COLOR;
float2 texcoord : TEXCOORD0;
#ifdef SOFTPARTICLES_ON
float4 projPos : TEXCOORD1;
#endif
UNITY_VERTEX_OUTPUT_STEREO
};
float4 _MainTex_ST;
v2f vert (appdata_t v)
{
v2f o;
UNITY_SETUP_INSTANCE_ID(v);
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o);
o.vertex = UnityObjectToClipPos(v.vertex);
#ifdef SOFTPARTICLES_ON
o.projPos = ComputeScreenPos (o.vertex);
COMPUTE_EYEDEPTH(o.projPos.z);
#endif
o.color = v.color;
o.texcoord = TRANSFORM_TEX(v.texcoord,_MainTex);
return o;
}
UNITY_DECLARE_DEPTH_TEXTURE(_CameraDepthTexture);
float _InvFade;
fixed4 frag (v2f i) : SV_Target
{
#ifdef SOFTPARTICLES_ON
float sceneZ = LinearEyeDepth (SAMPLE_DEPTH_TEXTURE_PROJ(_CameraDepthTexture, UNITY_PROJ_COORD(i.projPos)));
float partZ = i.projPos.z;
float fade = saturate (_InvFade * (sceneZ-partZ));
i.color.a *= fade;
#endif
return i.color * tex2D(_MainTex, i.texcoord) * i.color.a;
}
ENDCG
}
}
}
}
I don't know much about shaders, so I am struggling to add transparency to a shader I already use.
So basically I used the shader below to display 360 videos on a sphere. It flipps the normals so it is displayed on the inside.
However, I would like to add an alpha value to it so I can make the sphere (and therefore the video) as transparent as I need it to be. What should I change?
Shader "Custom/Equirectangular" {
Properties {
_Color ("Main Color", Color) = (1,1,1,1)
_MainTex ("Diffuse (RGB) Alpha (A)", 2D) = "gray" {}
}
SubShader{
Pass {
Tags {"LightMode" = "Always"}
Cull Front
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
#pragma glsl
#pragma target 3.0
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct v2f
{
float4 pos : SV_POSITION;
float3 normal : TEXCOORD0;
};
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.normal = v.normal;
return o;
}
sampler2D _MainTex;
#define PI 3.141592653589793
inline float2 RadialCoords(float3 a_coords)
{
float3 a_coords_n = normalize(a_coords);
float lon = atan2(a_coords_n.z, a_coords_n.x);
float lat = acos(a_coords_n.y);
float2 sphereCoords = float2(lon, lat) * (1.0 / PI);
return float2(1 - (sphereCoords.x * 0.5 + 0.5), 1 - sphereCoords.y);
}
float4 frag(v2f IN) : COLOR
{
float2 equiUV = RadialCoords(IN.normal);
return tex2D(_MainTex, equiUV);
}
ENDCG
}
}
FallBack "VertexLit"
}
EDIT
I have also noticed that texture tiling and offset does not work on this shader. Any ideas how to make that work?
Short story: this is going to be really difficult
Not the shader, the shader's easy. All you have to do is modify this line:
return tex2D(_MainTex, equiUV);
The Long story:
Or: what to modify this line to.
Video formats, due to their very nature, do not natively contain an alpha channel. You'll be hard pressed to find one that does (I looked into this briefly back in 2015 when "interviewing" for a "job" where they needed something similar).
Once you figure out how you're going to encode the alpha, then you can modify the shader to look for that data and convert it to an alpha value, and bam you're done.
I think the place that I was "interviewing" at did it by splitting the video into an upper and lower sections, the upper half was just the alpha channel (black/white) and the lower half was the color data. The player would split the video horizontally and treat the two halves differently. I didn't have to mess with it, they'd already done it, so I'm not sure how it was done programmatically, I can only speculate.
You forgot blending for transparency. And it is better to provide corresponding shader tags as well.
Tags { "LightMode"="Always" "Queue"="Transparent" "RenderType"="Transparent" }
Blend SrcAlpha OneMinusSrcAlpha
Cull Front
I've used Unity Projectors and a custom shader to create the effect of a custom image shape coming from a projector. It works great except if the light from the projector comes into contact with the light from another copy of the project, the light colors are combined. I don't want this to happen, so if i specify green for example for both projectors, and the light comes into contact with each other, the light should overlap and remain green for both projectors. Here is a picture of what I mean:
I'm new to shaders and found this shader online. Any help on how I could modify the shader to accomplish my goal would be much appreciated. Or if there is another way to accomplish this goal would be great. I tried putting each projector into a layer and tell each to ignore that layer when projecting their light, but this had no effect. Thanks.
Shader "Custom/MyProjectorShader" {
Properties{
_Color("Tint Color", Color) = (1,1,1,1)
_Attenuation("Falloff", Range(0.0, 1.0)) = 1.0
_ShadowTex("Cookie", 2D) = "gray" {}
}
Subshader{
Tags{ "Queue" = "Transparent" }
Pass{
ZWrite Off
ColorMask RGB
Blend SrcAlpha One // Additive blending
Offset -1, -1
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct v2f {
float4 uvShadow : TEXCOORD0;
float4 pos : SV_POSITION;
};
float4x4 unity_Projector;
float4x4 unity_ProjectorClip;
v2f vert(float4 vertex : POSITION)
{
v2f o;
o.pos = UnityObjectToClipPos(vertex);
o.uvShadow = mul(unity_Projector, vertex);
return o;
}
sampler2D _ShadowTex;
fixed4 _Color;
float _Attenuation;
fixed4 frag(v2f i) : SV_Target
{
// Apply alpha mask
fixed4 texCookie = tex2Dproj(_ShadowTex, UNITY_PROJ_COORD(i.uvShadow));
fixed4 outColor = _Color * texCookie.a;
// Attenuation
float depth = i.uvShadow.z; // [-1 (near), 1 (far)]
return outColor * clamp(1.0 - abs(depth) + _Attenuation, 0.0, 1.0);
}
ENDCG
}
}
}
Blend SrcAlpha OneMinusSrcAlpha
Unlike what the comment says, Blend One One is the regular Additive (Linear Dodge) blend mode. Using this new blend mode you'll need to make sure your textures have an alpha channel too.
https://docs.unity3d.com/Manual/SL-Blend.html
Probably Blend One OneMinusSrcAlpha would work better in this case because it would also avoid so remaining border cutout.
I want to draw a horizontal line on an object with shader code (hlsl).
The clipping shader simply takes the distance to a given Y-coordinate in the surface shader and checks if it is higher that a given value.
If so it will discard. The result is a shader that simply clips away all pixels that are not on a line.
void surf (Input IN, inout SurfaceOutputStandard o) {
// Albedo comes from a texture tinted by color
fixed4 c = tex2D (_MainTex, IN.uv_MainTex) * _Color;
float d = abs(_YClip - IN.worldPos.y); // _YClip is is the properties and can be changed
if (d > _LineThickness) {
discard;
}
}
Can I somehow combine this shader with the standard unity shader without changing the code?
I plan to have a gizmo shader that renders lines and all kind of stuff. It would be very practical if I could just tell unity to render this gizmo shader on top.
I believe you might be able to use or adapt this shader to your purpose.
Image showing before y axis reached.
Image showing during, where one half is above cutoff y value and other half is below. Note that the pattern it dissolves in, depends on a texture pattern you supply yourself. So it should be possible to have a strict cutoff instead of a more odd and uneven pattern.
After the object has fully passed by the cutoff y value. What I did in this case is to hide an object inside the start object that is slightly smaller than the first object you saw. But if you don't have anything inside, the object will just be invisible, or clipped.
Shader "Dissolve/Dissolve"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_DissolveTexture("Dissolve Texture", 2D) = "white" {}
_DissolveY("Current Y of the dissolve effect", Float) = 0
_DissolveSize("Size of the effect", Float) = 2
_StartingY("Starting point of the effect", Float) = -1 //the number is supposedly in meters. Is compared to the Y coordinate in world space I believe.
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
//#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
//UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
float3 worldPos : TEXCOORD1;
};
sampler2D _MainTex;
float4 _MainTex_ST;
sampler2D _DissolveTexture;
float _DissolveY;
float _DissolveSize;
float _StartingY;
v2f vert (appdata v) //"The vertex shader"
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
o.worldPos = mul(unity_ObjectToWorld, v.vertex).xyz;
//UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target //"For drawing the pixel on top"
{
float transition = _DissolveY - i.worldPos.y; //Cutoff value where world position is taken into account.
clip(_StartingY + (transition + (tex2D(_DissolveTexture, i.uv)) * _DissolveSize)); //Clip = cutoff if above 0.
//My understanding: If StartingY for dissolve effect + transition value and uv mapping of the texture is taken into account, clip off using the _DissolveSize.
//This happens to each individual pixel.
// sample the texture
fixed4 col = tex2D(_MainTex, i.uv);
// apply fog
//UNITY_APPLY_FOG(i.fogCoord, col);
//clip(1 - i.vertex.x % 10); //"A pixel is NOT rendered if clip is below 0."
return col;
}
ENDCG
}
}
}
Here you see the inspector fields available.
I have a similar script with the x axis.