Unity CG Texture Layering using Passes - unity3d

This is what I’m trying to accomplish
Texture geometry with primary shader
Layer a second texture over the first one by a projected amount following the normal.
Here is my current shader code:
Shader "Custom/FurShader"
{
Properties
{
_MainTex( "Main Texture", 2D ) = "white" {}
_MaxHairLength( "Max Hair Length", Float ) = 0.5
_NoOfPasses( "Number of Passes", Float ) = 2.0
}
CGINCLUDE
//includes
#include "UnityCG.cginc"
//structures
struct vertexInput
{
float4 vertex : POSITION;
float4 normal : NORMAL;
float4 texcoord : TEXCOORD0;
};
struct fragmentInput
{
float4 pos : SV_POSITION;
half2 uv : TEXCOORD0;
};
//uniforms
uniform float _MaxHairLength;
uniform sampler2D _MainTex;
uniform float4 _MainTex_ST;
uniform sampler2D _SecondTex;
uniform float4 _SecondTex_ST;
uniform float _NoOfPasses;
//function
inline fragmentInput LevelFragmentShader( vertexInput i, int level )
{
fragmentInput o;
float movementDist = ( _MaxHairLength / _NoOfPasses ) * level;
float4 pos = ( i.vertex + ( i.normal * movementDist ) );
o.pos = mul( UNITY_MATRIX_MVP, pos );
o.uv = TRANSFORM_TEX( i.texcoord, _SecondTex );
return o;
}
half4 frag( fragmentInput i ) : COLOR
{
return tex2D( _SecondTex, i.uv );
}
ENDCG
SubShader {
Tags { "Queue" = "Transparent"}
Blend SrcAlpha OneMinusSrcAlpha
Pass
{
CGPROGRAM
// Upgrade NOTE: excluded shader from OpenGL ES 2.0 because it does not contain a surface program or both vertex and fragment programs.
#pragma exclude_renderers gles
#pragma vertex vert
#pragma fragment frag_unique
fragmentInput vert( vertexInput i )
{
fragmentInput o;
o.pos = mul( UNITY_MATRIX_MVP, i.vertex );
o.uv = TRANSFORM_TEX( i.texcoord, _MainTex );
return o;
}
half4 frag_unique( fragmentInput i ) : COLOR
{
return tex2D( _MainTex, i.uv );
}
ENDCG
}
Pass
{
CGPROGRAM
// Upgrade NOTE: excluded shader from OpenGL ES 2.0 because it does not contain a surface program or both vertex and fragment programs.
#pragma exclude_renderers gles
#pragma vertex vert
#pragma fragment frag
fragmentInput vert( vertexInput i )
{
fragmentInput o = LevelFragmentShader( i, 1 );
return o;
}
ENDCG
}
}
FallBack "Diffuse"
}
But as you can see the result second texture is not projecting perpendicularly edge to edge. Any suggestions would be great, I'm sure my maths is correct vertxPos + ( Normal * projectionDistance). Could it be something to do with how I'm using unitys ModelViewProjection Matrix?
Image showing result
http://i1265.photobucket.com/albums/jj508/maxfire1/Capture_zpsc8db2b1f.png

Related

Unity Shader Error when Building to WebGL

When trying to build my game to Webgl I receive the following errors:
Shader error in 'Hidden/Universal/CoreBlit': invalid subscript 'positionCS' at /PathToProject/Library/PackageCache/com.unity.render-pipelines.core#12.1.2/Runtime/Utilities/Blit.hlsl(92) (on gles)
Shader error in 'Hidden/kMotion/CameraMotionVectors': SV_VertexID semantic is not supported on GLES 2.0 at line 11 (on gles)
The Shader I am using has the following code:
Shader "Skybox Gradient"
{
Properties
{
_Top("Top", Color) = (1,1,1,0)
_Bottom("Bottom", Color) = (0,0,0,0)
_mult("mult", Float) = 1
_pwer("pwer", Float) = 1
[Toggle(_SCREENSPACE_ON)] _Screenspace("Screen space", Float) = 0
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
CGINCLUDE
#pragma target 3.0
ENDCG
Blend Off
Cull Back
ColorMask RGBA
ZWrite On
ZTest LEqual
Offset 0 , 0
Pass
{
Name "Unlit"
Tags { "LightMode"="ForwardBase" }
CGPROGRAM
#ifndef UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX
//only defining to not throw compilation error over Unity 5.5
#define UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input)
#endif
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_instancing
#include "UnityCG.cginc"
#pragma shader_feature_local _SCREENSPACE_ON
struct appdata
{
float4 vertex : POSITION;
float4 color : COLOR;
UNITY_VERTEX_INPUT_INSTANCE_ID
};
struct v2f
{
float4 vertex : SV_POSITION;
#ifdef ASE_NEEDS_FRAG_WORLD_POSITION
float3 worldPos : TEXCOORD0;
#endif
UNITY_VERTEX_INPUT_INSTANCE_ID
UNITY_VERTEX_OUTPUT_STEREO
float4 ase_texcoord1 : TEXCOORD1;
float4 ase_texcoord2 : TEXCOORD2;
};
uniform float4 _Bottom;
uniform float4 _Top;
uniform float _mult;
uniform float _pwer;
v2f vert ( appdata v )
{
v2f o;
UNITY_SETUP_INSTANCE_ID(v);
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o);
UNITY_TRANSFER_INSTANCE_ID(v, o);
float4 ase_clipPos = UnityObjectToClipPos(v.vertex);
float4 screenPos = ComputeScreenPos(ase_clipPos);
o.ase_texcoord2 = screenPos;
o.ase_texcoord1 = v.vertex;
float3 vertexValue = float3(0, 0, 0);
#if ASE_ABSOLUTE_VERTEX_POS
vertexValue = v.vertex.xyz;
#endif
vertexValue = vertexValue;
#if ASE_ABSOLUTE_VERTEX_POS
v.vertex.xyz = vertexValue;
#else
v.vertex.xyz += vertexValue;
#endif
o.vertex = UnityObjectToClipPos(v.vertex);
#ifdef ASE_NEEDS_FRAG_WORLD_POSITION
o.worldPos = mul(unity_ObjectToWorld, v.vertex).xyz;
#endif
return o;
}
fixed4 frag (v2f i ) : SV_Target
{
UNITY_SETUP_INSTANCE_ID(i);
UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(i);
fixed4 finalColor;
#ifdef ASE_NEEDS_FRAG_WORLD_POSITION
float3 WorldPosition = i.worldPos;
#endif
float4 screenPos = i.ase_texcoord2;
float4 ase_screenPosNorm = screenPos / screenPos.w;
ase_screenPosNorm.z = ( UNITY_NEAR_CLIP_VALUE >= 0 ) ? ase_screenPosNorm.z : ase_screenPosNorm.z * 0.5 + 0.5;
#ifdef _SCREENSPACE_ON
float staticSwitch13 = ase_screenPosNorm.y;
#else
float staticSwitch13 = i.ase_texcoord1.xyz.y;
#endif
float4 lerpResult3 = lerp( _Bottom , _Top , pow( saturate( ( staticSwitch13 * _mult ) ) , _pwer ));
finalColor = lerpResult3;
return finalColor;
}
ENDCG
}
}
CustomEditor "ASEMaterialInspector"
}
The error only occurs every second or third time I try to locally build the game.
However, when I am building with Unity Cloud Build it always appears.

Space curvature bending in Unity3D as Post Effect/Image Effect

I'm trying to archieve a chilindrical effect like this on Unity3D:
But every solution is using material based shader, sadly I must have a Post Process effect or an Image Effect, for these reasons:
One map out of 30 needs to use this effect and there are many materials that are shared between them...
Every solution is vertex based. I've done some experiments but I have models with different polygon count, this means that the effect would create visual artifacts (but this can by fixed by editing the 3d models eventually).
I'm at an advanced stage of development.
Do you think it's possible to create a simple effect (even a fake one) that moves the pixels downwards/upwards based on the distance to the camera? (I assume I need to use the depth map)
I've tried very hard but I had no success, the effect doesn't do anything or just won't compile :(
This is the best I could come up with, the grayscale in the frag method is only to check if the shader is working, but once I define the Vert function the grayscale disappears and the shader does nothing.
Shader "Hidden/Custom/WorldCurvature"
{
HLSLINCLUDE
#include "Packages/com.unity.postprocessing/PostProcessing/Shaders/StdLib.hlsl"
TEXTURE2D_SAMPLER2D(_MainTex, sampler_MainTex);
float _Bend;
struct Attributes
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
struct Varyings
{
float4 vertex : SV_POSITION;
float2 texcoord : TEXCOORD0;
};
float4 Frag(Varyings i) : SV_Target
{
float4 color = SAMPLE_TEXTURE2D(_MainTex, sampler_MainTex, i.texcoord);
float luminance = dot(color.rgb, float3(0.2126729, 0.7151522, 0.0721750));
color.rgb = lerp(color.rgb, luminance.xxx, _Bend.xxx);
return color;
}
Varyings Vert(Attributes v)
{
Varyings o;
float4 vv = mul(unity_ObjectToWorld, v.vertex );
vv.xyz -= _WorldSpaceCameraPos.xyz;
vv = float4( 0.0f, (vv.x * vv.x) * - _Bend, 0.0f, 0.0f );
v.vertex += mul(unity_WorldToCamera, vv);
o.vertex = mul(unity_WorldToCamera, vv);
o.texcoord = v.texcoord;
return o;
}
ENDHLSL
SubShader
{
Cull Off ZWrite Off ZTest Always
Pass
{
HLSLPROGRAM
#pragma vertex Vert
#pragma fragment Frag
ENDHLSL
}
}
}
I've done another experiment but I think it would only work in a 2D environment, here the image stops once I activate the image effect:
Shader "Hidden/Custom/CylinderImageEffect" {
Properties {
_MainTex ("Texture", 2D) = "white" {}
}
SubShader {
Cull Off ZWrite Off ZTest Always
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct v2f {
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
};
v2f vert( appdata_img v )
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord.xy;
return o;
}
sampler2D _MainTex;
fixed4 frag (v2f i) : SV_Target {
i.uv.x = 1 - acos(i.uv.x * 2 - 1) / 3.14159265;
return tex2D(_MainTex, i.uv);
}
ENDCG
}
}
}

Can Unity render just 4 sides of a skybox?

In my game I don't need the top and bottom sides of skyboxes, so I removed them from the material. However, according to Frame Debugger, Unity still renders all 6 sides, even though they're never seen and should be occluded. Is there a way to ensure only four sides get rendered?
This is usually done via a certain shader like e.g. the default 6-sided Skybox.
What you can see there is that it contains 6 passes for the 6 sides.
Something you have to render at these points so I think you can't just remove the passes but I guess you could probably create one for only 4-sides which simply returns only a simple color and skipping the texture like e.g.
Shader "Custom/Skybox4"
{
Properties
{
_Tint ("Tint Color", Color) = (.5, .5, .5, .5)
[Gamma] _Exposure ("Exposure", Range(0, 8)) = 1.0
_Rotation ("Rotation", Range(0, 360)) = 0
[NoScaleOffset] _FrontTex ("Front [+Z] (HDR)", 2D) = "grey" {}
[NoScaleOffset] _BackTex ("Back [-Z] (HDR)", 2D) = "grey" {}
[NoScaleOffset] _LeftTex ("Left [+X] (HDR)", 2D) = "grey" {}
[NoScaleOffset] _RightTex ("Right [-X] (HDR)", 2D) = "grey" {}
_TopBottomColor ("Top Bottom Fill Color", Color) = (0, 0, 0, 0)
}
SubShader
{
Tags
{
"Queue"="Background" "RenderType"="Background" "PreviewType"="Skybox"
}
Cull Off ZWrite Off
CGINCLUDE
#include "UnityCG.cginc"
half4 _Tint;
half _Exposure;
float _Rotation;
float3 RotateAroundYInDegrees(float3 vertex, float degrees)
{
float alpha = degrees * UNITY_PI / 180.0;
float sina, cosa;
sincos(alpha, sina, cosa);
float2x2 m = float2x2(cosa, -sina, sina, cosa);
return float3(mul(m, vertex.xz), vertex.y).xzy;
}
struct appdata_t
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
UNITY_VERTEX_INPUT_INSTANCE_ID
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 texcoord : TEXCOORD0;
UNITY_VERTEX_OUTPUT_STEREO
};
v2f vert(appdata_t v)
{
v2f o;
UNITY_SETUP_INSTANCE_ID(v);
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o);
float3 rotated = RotateAroundYInDegrees(v.vertex, _Rotation);
o.vertex = UnityObjectToClipPos(rotated);
o.texcoord = v.texcoord;
return o;
}
half4 skybox_frag(v2f i, sampler2D smp, half4 smpDecode)
{
half4 tex = tex2D(smp, i.texcoord);
half3 c = DecodeHDR(tex, smpDecode);
c = c * _Tint.rgb * unity_ColorSpaceDouble.rgb;
c *= _Exposure;
return half4(c, 1);
}
ENDCG
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 2.0
sampler2D _FrontTex;
half4 _FrontTex_HDR;
half4 frag(v2f i) : SV_Target { return skybox_frag(i, _FrontTex, _FrontTex_HDR); }
ENDCG
}
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 2.0
sampler2D _BackTex;
half4 _BackTex_HDR;
half4 frag(v2f i) : SV_Target { return skybox_frag(i, _BackTex, _BackTex_HDR); }
ENDCG
}
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 2.0
sampler2D _LeftTex;
half4 _LeftTex_HDR;
half4 frag(v2f i) : SV_Target { return skybox_frag(i, _LeftTex, _LeftTex_HDR); }
ENDCG
}
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 2.0
sampler2D _RightTex;
half4 _RightTex_HDR;
half4 frag(v2f i) : SV_Target { return skybox_frag(i, _RightTex, _RightTex_HDR); }
ENDCG
}
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
half4 _TopBottomColor;
half4 frag(v2f i) : SV_Target { return _TopBottomColor; }
ENDCG
}
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
half4 _TopBottomColor;
half4 frag(v2f i) : SV_Target { return _TopBottomColor; }
ENDCG
}
}
}
Which looks like this
(Skybox source: Emil Persson, aka Humus)
This won't fully remove the rendering pass but should be at least faster.

Merging 2 shaders in Unity using CG programming and Shaderlab

I want to merge the functionality of these two shaders for Unity.
First shader is main shader that adds a glitch hologram effect.
The Second shader is making a surface texture flimmer for added visual effect.
I am trying to merge my versions of the code, alternatively different passes that allows me to implement both features on same material, if possible.
I have tried shaderpass function, but cannot make it work.
Is it possible to do?
1st shadercode
Shader "Custom/Cool Hologram Original"
{
Properties
{
//This program takes an existing shader script and builds upon it, by adding the features :
//Tintcolor - allowing for manipulation of colors
//Transparency - Changing opaque in tags to transparency so that the model becomes transparent
//Properties allow defining public manipulative variables
_MainTex ("AlbedoTexture", 2D) = "white" {} //braces are useless leftover braces no longer needed according to Unity developer
_TintColor("Tint Color", Color) = (1,1,1,1) //Public variable appearing in inspector
_Transparency("Transparency", Range(0.0,0.5)) = 0.25
_CutoutTresh("Cutout Threshold", Range(0.0,1.0)) = 0.2
_Distance("Distance", Float) = 1
_Amplitude("Amplitude", Float) = 1
_Speed("Speed", Float) = 1
_Amount("Amount", Range(0.0,1.0)) = 1
}
//The actual shader program
SubShader
{
Tags {"Queue"="Transparent" "RenderType"="Transparent" } //Added queue and transparent elements because order matters (see render order queu tag)
LOD 100 //LOD means level of details and used for when player is close or far away
ZWrite Off //This is related to culling and depth testing, controlling if something is written to depth buffer. Zwrite off is for transparent objects
Blend SrcAlpha OneMinusSrcAlpha //See blend factors in unity doc. It is about render order and how they should blend
Pass //A pass can be made for each type of situation, like Virtual Reality etc
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc" //Shaders does not use inheritance, it has got monobehaviour instead, so it must be defined as #included
struct appdata //Structs are objects with 2 objects each. appdata is passed in to v2f vert as argument
{
float4 vertex : POSITION; //Variable with 4 floating point numbers x,y,z,w. POSITION is a semantic binding telling where it is to be used
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION; //SV_Position corresponds to screen position in Unity
};
sampler2D _MainTex;
float4 _MainTex_ST;
float4 _TintColor;
float _Transparency;
float _CutoutThresh;
float _Distance;
float _Amplitude;
float _Speed;
float _Amount;
v2f vert (appdata v)
{
v2f o;
v.vertex.x += sin(_Time.y * _Speed + v.vertex.y * _Amplitude) * _Distance * _Amount; //Allows sinusoidal movement to vertices in object space b4 translation to clip
o.vertex = UnityObjectToClipPos(v.vertex); //See learnopengl.com to see 5 spaces: local space, world space, view space, clip space, screen space
o.uv = TRANSFORM_TEX(v.uv, _MainTex); //Taking uv data from model and data from maintexture (see the shader texture in inspector)
return o; //Returns struct after it has been build in coordinate system, it will then be passed to fragment function
}
fixed4 frag (v2f i) : SV_Target //Takes in v2f struct calling it i and then bind it to render target, which is frame buffer for screen
{
// sample the texture
fixed4 col = tex2D(_MainTex, i.uv) + _TintColor; //fixed 4 col is color. Can be fixed rgbA for 3 colors and an alpha channel. tex2D reads in colors from maintex and struct data from i
col.a = _Transparency; //The colors are what actually drawed in this feature with tex3D
clip(col.r - _CutoutThresh); //Not drawing pixels with certain amount of red color
return col;
}
ENDCG
}
}
}
2nd shadercode
Shader "Custom/Offset 1"
{
Properties {
_MainTex ("Texture", 2D) = "white" {}
_ScrollSpeeds ("Scroll Speeds", vector) = (0, -3, 0, 0)
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
// Declare our new parameter here so it's visible to the CG shader
float4 _ScrollSpeeds;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
// Shift the uvs over time.
o.uv += _ScrollSpeeds * _Time.x;
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
// sample the texture
fixed4 col = tex2D(_MainTex, i.uv);
// apply fog
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
ENDCG
}
}
}
3rd shadercode
Shader "Custom/Combined"
{
Properties
{
_MainTex ("AlbedoTexture", 2D) = "white" {}
_HoloColor("Hologram Color", Color) = (1,1,1,1)
_Transparency("Transparency", Range(0.0,0.5)) = 0.25
_CutoutTresh("Cutout Threshold", Range(0.0,1.0)) = 0.2
_Distance("Distance", Float) = 1
_Amplitude("Amplitude", Float) = 1
_Speed("Speed", Float) = 1
_ScrollSpeeds("Scroll Speeds", vector) = (0,-3,0,0) //added from offset shader
_Amount("Amount", Range(0.0,1.0)) = 1
}
SubShader {
Tags {"Queue"="Transparent" "RenderType"="Transparent" }
LOD 100
ZWrite Off
Blend SrcAlpha OneMinusSrcAlpha
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f {
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1) //added from offset shader
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
float4 _HoloColor;
float4 _ScrollSpeeds;
float _Transparency;
float _CutoutThresh;
float _Distance;
float _Amplitude;
float _Speed;
float _Amount;
v2f vert (appdata v)
{
v2f o;
v.vertex.x += sin(_Time.y * _Speed + v.vertex.y * _Amplitude) * _Distance * _Amount;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
o.uv += _ScrollSpeeds * _Time.x; //Added from offset shader
UNITY_TRANSFER_FOG(o,o.vertex); //added from offset shader
return o;
}
fixed4 frag (v2f i) : SV_Target
{
fixed4 col = tex2D(_MainTex, i.uv) + _HoloColor;
col.a = _Transparency;
clip(col.r - _CutoutThresh);
UNITY_APPLY_FOG(i.fogCoord, col); //Added from offset shader
return col;
}
ENDCG
}
}
Fallback "Diffuse"
}

Unity shader pixel manipulation

I'm studying shaders in unity and I'm trying to achieve the below image. But I can't seem to make my code produce my target output. My end goal is to divide the whole image depending on the size of my array. Any idea how to correct my code?
Target output:
Code:Shader "Custom/PassArray"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
Tags {"Queue"="Transparent"}
Blend SrcAlpha OneMinusSrcAlpha
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
uniform float4 _Test [3];
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
v2f vert (appdata v)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
o.uv = v.uv;
return o;
}
sampler2D _MainTex;
fixed4 frag (v2f i) : SV_Target
{
fixed4 col;
_Test[0] = fixed4(0,0,1,1);
_Test[1] = fixed4(1,0,0,1);
_Test[2] = fixed4(0,1,0,1);
//Test1: This code block produces output1 image.
int index = round(i.uv.x*_Test.Length);
col = _Test[index];
//Test2: This codeblocks outputs pure green.
//if(i.uv.x < 1/3){
// col = _Test[0];
//}
//else if(i.uv.x < 2/3){
// col = _Test[1];
//}
//else if(i.uv.x < 3/3){
// col = _Test[2];
//}
return col;
}
ENDCG
}
}
}
Actual output:
If I understood it correctly, it's giving me black pixels because of the max i.uv.x is 1. Thus making the index 3 (based on the computation i.uv.x/_Test.Length). But I don't have index 3 in my array though.
Change round to floor. Because you're rounding, values greater than 2.5 get rounded to 3. As you probably know, an array of 3 elements has no index 3, hence the black bar.
I've figured it out. Here's the updated code for anyone who may find it useful. I changed the formula from int index = round(i.uv.x_Test.Length); to int index = ceil(i.uv.x/1_Test.Length)-1;
Shader "Custom/PassArray"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
Tags {"Queue"="Transparent"}
Blend SrcAlpha OneMinusSrcAlpha
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
uniform float4 _Test [3];
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
float4 color : COLOR;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float4 color : COLOR;
};
v2f vert (appdata v)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
o.uv = v.uv;
return o;
}
sampler2D _MainTex;
fixed4 frag (v2f i) : SV_Target
{
fixed4 col;
_Test[0] = fixed4(0,0,1,1);
_Test[1] = fixed4(1,0,0,1);
_Test[2] = fixed4(0,1,0,1);
//Test1:
int index = ceil(i.uv.x/1*_Test.Length)-1;
col = _Test[index];
return col;
}
ENDCG
}
}
}