I'm trying to do a shader to curve the world like Subway Surfer does.
I have found a GitHub repo where someone pushes an approximation for it that works cool.
This is the code:
Shader "Custom/Curved" {
Properties {
_MainTex ("Base (RGB)", 2D) = "white" {}
_QOffset ("Offset", Vector) = (0,0,0,0)
_Dist ("Distance", Float) = 100.0
}
SubShader {
Tags { "RenderType"="Opaque" }
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _MainTex;
float4 _QOffset;
float _Dist;
struct v2f {
float4 pos : SV_POSITION;
float4 uv : TEXCOORD0;
};
v2f vert (appdata_base v)
{
v2f o;
float4 vPos = mul (UNITY_MATRIX_MV, v.vertex);
float zOff = vPos.z/_Dist;
vPos += _QOffset*zOff*zOff;
o.pos = mul (UNITY_MATRIX_P, vPos);
o.uv = v.texcoord;
return o;
}
half4 frag (v2f i) : COLOR
{
half4 col = tex2D(_MainTex, i.uv.xy);
return col;
}
ENDCG
}
}
FallBack "Diffuse"
}
The point is that now I want to send that new vertex positions to the surface shader to be able to have illumination an others.
I have read that I have to delete the fragment shader but I still having the problem that I can not send the new information to the surface shader.
This is my code:
Shader "Custom/Curve" {
Properties{
_Color("Color", Color) = (1,1,1,1)
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Glossiness("Smoothness", Range(0,1)) = 0.5
_Metallic("Metallic", Range(0,1)) = 0.0
_QOffset("Offset", Vector) = (0,0,0,0)
_Dist("Distance", Float) = 100.0
}
SubShader{
Tags{ "RenderType" = "Opaque" }
LOD 200
CGPROGRAM
#pragma surface surf Standard fullforwardshadows vertex:vert addshadow
#pragma target 3.0
sampler2D _MainTex;
struct Input {
float2 uv_MainTex;
};
half _Glossiness;
half _Metallic;
fixed4 _Color;
float4 _QOffset;
float _Dist;
void vert(inout appdata_full v)
{
v.position.x += 10;
}
void surf(Input IN, inout SurfaceOutputStandard o) {
// Albedo comes from a texture tinted by color
fixed4 c = tex2D(_MainTex, IN.uv_MainTex) * _Color;
o.Albedo = c.rgb;
// Metallic and smoothness come from slider variables
o.Metallic = _Metallic;
o.Smoothness = _Glossiness;
o.Alpha = c.a;
}
ENDCG
}
FallBack "Diffuse"
}
As you can see now in the vertex shader I'm just trying to add some units to the X position of the vertex because I think that if I achieve that apply the "curved change" would be trivial but If you think It is not going to be that easy I would appreciate if you warn me.
edited:
Here's an example for unity 2018.1
https://gist.github.com/bricevdm/caaace3cce9a87e081602ffd08dee1ad
float4 worldPosition = mul(unity_ObjectToWorld, v.vertex);
// get world space position of vertex
half2 wpToCam = _WorldSpaceCameraPos.xz - worldPosition.xz;
// get vector to camera and dismiss vertical component
half distance = dot(wpToCam, wpToCam);
// distance squared from vertex to the camera, this power gives the curvature
worldPosition.y -= distance * _Curvature;
// offset vertical position by factor and square of distance.
// the default 0.01 would lower the position by 1cm at 1m distance, 1m at 10m and 100m at 100m
v.vertex = mul(unity_WorldToObject, worldPosition);
// reproject position into object space
You are mixing up regular CG shaders and Surface shaders. Your sample from github is the former. v.vertex in Surface shaders is expected to be in object space, unlike float4 pos : SV_POSITION in the CG shader, which is expected to be in its final - clip/screen space - position.
The solution is to inverse the transformation back to object space. In your case you'd need to expose the inverse projection matrix from the camera.
replace v.vertex = mul (UNITY_MATRIX_P, vPos);
with this matrix: https://docs.unity3d.com/ScriptReference/Camera-cameraToWorldMatrix.html. Since you transformed your vertex from object to camera space with UNITY_MATRIX_MV you need to reverse to world first, then to object space using unity_WorldToObject (or better combine both on the cpu).
BUT it is actually much easier to compute the curvature in world space with the matrices already provided:
https://docs.unity3d.com/Manual/SL-UnityShaderVariables.html
unity_ObjectToWorld Current model matrix.
unity_WorldToObject Inverse of current world matrix.
It's because there isn't value as position in appdata_full struct:
struct appdata_full {
float4 vertex : POSITION;
float4 tangent : TANGENT;
float3 normal : NORMAL;
float4 texcoord : TEXCOORD0;
float4 texcoord1 : TEXCOORD1;
fixed4 color : COLOR;
#if defined(SHADER_API_XBOX360)
half4 texcoord2 : TEXCOORD2;
half4 texcoord3 : TEXCOORD3;
half4 texcoord4 : TEXCOORD4;
half4 texcoord5 : TEXCOORD5;
#endif
};
Instead of v.position use v.vertex like this:
void vert (inout appdata_full v, out Input o){
UNITY_INITIALIZE_OUTPUT(Input,o);
v.vertex += 10;
}
And here Is surface version of your curve shader:
Shader "Custom/Curve" {
Properties{
_Color("Color", Color) = (1,1,1,1)
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Glossiness("Smoothness", Range(0,1)) = 0.5
_Metallic("Metallic", Range(0,1)) = 0.0
_QOffset("Offset", Vector) = (0,0,0,0)
_Dist("Distance", Float) = 100.0
}
SubShader{
Tags{ "RenderType" = "Opaque" }
LOD 200
CGPROGRAM
#pragma surface surf Standard fullforwardshadows vertex:vert addshadow
#pragma target 3.0
sampler2D _MainTex;
struct Input {
float2 uv_MainTex;
};
half _Glossiness;
half _Metallic;
fixed4 _Color;
float4 _QOffset;
float _Dist;
void vert (inout appdata_full v, out Input o){
UNITY_INITIALIZE_OUTPUT(Input,o);
float4 vPos = mul (UNITY_MATRIX_MV, v.vertex);
float zOff = vPos.z/_Dist;
vPos += _QOffset*zOff*zOff;
v.vertex = mul (UNITY_MATRIX_P, vPos);
v.texcoord = v.texcoord;
}
void surf(Input IN, inout SurfaceOutputStandard o) {
// Albedo comes from a texture tinted by color
fixed4 c = tex2D(_MainTex, IN.uv_MainTex) * _Color;
o.Albedo = c.rgb;
// Metallic and smoothness come from slider variables
o.Metallic = _Metallic;
o.Smoothness = _Glossiness;
o.Alpha = c.a;
}
ENDCG
}
FallBack "Diffuse"
}
Related
I've been trying to make the shader example from unity's website to compile in unity3d 2019.
Shader "Custom/test" {
Properties {
_Color ("Color", Color) = (1,1,1,1)
_MainTex ("Albedo (RGB)", 2D) = "white" {}
_Glossiness ("Smoothness", Range(0,1)) = 0.5
_Metallic ("Metallic", Range(0,1)) = 0.0
}
SubShader {
Tags {"Queue"="Transparent" "RenderType"="Transparent"}
Blend SrcAlpha OneMinusSrcAlpha
ZWrite off
LOD 200
CGPROGRAM
// Physically based Standard lighting model, and enable shadows on all light types
#pragma surface surf Standard alpha vertex:vert
// Use shader model 3.0 target, to get nicer looking lighting
#pragma target 3.0
sampler2D _MainTex;
struct appdata_particles {
float4 vertex : POSITION;
float3 normal : NORMAL;
float4 color : COLOR;
float4 texcoords : TEXCOORD0;
float texcoordBlend : TEXCOORD1;
};
struct Input {
float2 uv_MainTex;
float2 texcoord1;
float blend;
float4 color;
};
void vert(inout appdata_particles v, out Input o) {
UNITY_INITIALIZE_OUTPUT(Input,o);
o.uv_MainTex = v.texcoords.xy;
o.texcoord1 = v.texcoords.zw;
o.blend = v.texcoordBlend;
o.color = v.color;
}
half _Glossiness;
half _Metallic;
fixed4 _Color;
void surf (Input IN, inout SurfaceOutputStandard o) {
fixed4 colA = tex2D(_MainTex, IN.uv_MainTex);
fixed4 colB = tex2D(_MainTex, IN.texcoord1);
fixed4 c = 2.0f * IN.color * lerp(colA, colB, IN.blend) * _Color;
o.Albedo = c.rgb;
// Metallic and smoothness come from slider variables
o.Metallic = _Metallic;
o.Smoothness = _Glossiness;
o.Alpha = c.a;
}
ENDCG
}
FallBack "Diffuse"
}
But the compilation gives an error:
Shader error in 'Custom/test': invalid subscript 'texcoord' at line 157 (on d3d11)
I suspect there is a difference between unity 2017 for which the example was made and unity 2019, but i can't figure out what the problem is.
Managed to figure it out:
Shader "Custom/StandardTransparentQueue" {
Properties {
_Color ("Color", Color) = (1,1,1,1)
_MainTex ("Albedo (RGB)", 2D) = "white" {}
_Glossiness ("Smoothness", Range(0,1)) = 0.5
_Metallic ("Metallic", Range(0,1)) = 0.0
}
SubShader {
Tags {"Queue"="Transparent" "RenderType"="Transparent"}
Blend SrcAlpha OneMinusSrcAlpha
ZWrite off
LOD 200
CGPROGRAM
// Physically based Standard lighting model, and enable shadows on all light types
#pragma surface surf Standard alpha vertex:vert
// Use shader model 3.0 target, to get nicer looking lighting
#pragma target 3.0
sampler2D _MainTex;
struct appdata_particles {
float4 vertex : POSITION;
float3 normal : NORMAL;
float4 color : COLOR;
float4 texcoords : TEXCOORD0;
float texcoordBlend : TEXCOORD1;
};
struct Input {
float2 texcoord;
float2 texcoord1;
float blend;
float4 color;
};
void vert(inout appdata_particles v, out Input o) {
UNITY_INITIALIZE_OUTPUT(Input,o);
o.texcoord = v.texcoords.xy;
o.texcoord1 = v.texcoords.zw;
o.blend = v.texcoordBlend;
o.color = v.color;
}
half _Glossiness;
half _Metallic;
fixed4 _Color;
void surf (Input IN, inout SurfaceOutputStandard o) {
fixed4 colA = tex2D(_MainTex, IN.texcoord);
fixed4 colB = tex2D(_MainTex, IN.texcoord1);
fixed4 c = 2.0f * IN.color * lerp(colA, colB, IN.blend) * _Color;
o.Albedo = c.rgb;
// Metallic and smoothness come from slider variables
o.Metallic = _Metallic;
o.Smoothness = _Glossiness;
o.Alpha = c.a;
}
ENDCG
}
FallBack "Diffuse"
}
In unity 2019 it expects texcoord to be a member of the Input structure instead of uv_MainTex.
Hope it helps others :)
Unity Project, Want to combine these two shaders into one shader to get both of their functionality. One shader is for lighting, the other shader is for rendering better. How do I combine?
Shader "Transparent/Cutout/Lit3dSprite" {
Properties{
_MainCol("Main Tint", Color) = (1,1,1,1)
_MainTex("Main Texture", 2D) = "white" {}
_Cutoff("Alpha cutoff", Range(0,1)) = 0.5
}
SubShader{
Tags {"Queue" = "AlphaTest" "IgnoreProjector" = "True" "RenderType" = "TransparentCutout" "PreviewType" = "Plane"}
Cull Off
LOD 200
CGPROGRAM
#pragma surface surf SimpleLambert alphatest:_Cutoff addshadow fullforwardshadows
#pragma target 3.0
sampler2D _MainTex;
fixed4 _MainCol;
half4 LightingSimpleLambert(SurfaceOutput s, half3 lightDir, half atten)
{
half4 c;
c.rgb = s.Albedo * _MainCol.rgb * (atten)* _LightColor0.rgb;
c.a = s.Alpha;
return c;
}
struct Input {
float2 uv_MainTex;
};
void surf(Input IN, inout SurfaceOutput o) {
fixed4 c = tex2D(_MainTex, IN.uv_MainTex) * _MainCol;
o.Albedo = lerp(c.rgb, c.rgb, c.a);
o.Alpha = c.a;
}
ENDCG
}
Fallback "Transparent/Cutout/VertexLit"
}
Shader 2:
// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)'
Shader "RetroAA/Sprite"
{
Properties
{
[PerRendererData] _MainTex ("Sprite Texture", 2D) = "white" {}
_Color ("Tint", Color) = (1,0,0,0)
}
SubShader
{
Tags {
"Queue"="Transparent"
"IgnoreProjector"="True"
"RenderType"="Transparent"
"PreviewType"="Plane"
"CanUseSpriteAtlas"="True"
}
LOD 100
Cull Off
Lighting Off
ZWrite Off
Blend SrcAlpha OneMinusSrcAlpha
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fog
#include "RetroAA.cginc"
struct appdata {
float4 vertex : POSITION;
float4 color : COLOR;
float2 uv : TEXCOORD0;
};
struct v2f {
float4 vertex : SV_POSITION;
fixed4 color : COLOR;
float2 uv : TEXCOORD0;
};
sampler2D _MainTex;
float4 _MainTex_ST;
float4 _MainTex_TexelSize;
float4 _Color;
v2f vert(appdata v){
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
o.color = v.color * _Color;
return o;
}
fixed4 frag(v2f i) : SV_Target {
fixed4 color = RetroAA(_MainTex, i.uv, _MainTex_TexelSize);
return i.color*color*color.a;
}
ENDCG
}
}
}
The second shader isn't too complicated, and can be merged into the first one by just using the second shader's code to change how the first shader's surf calculates fixed4 c.
You'll also need to include the definition for RetroAA and include the Texel Size of the main texture in the shader variables.
However, the first shader assumes that there is no partial transparency, and the second shader requires it, so you have to accomodate that. You'll need to use Alpha blending, change the RenderType and Queue to Transparent, and indicate ZWrite Off.
Here is what that might all look together:
Shader "Transparent/Cutout/Lit3dSprite" {
Properties{
_MainCol("Main Tint", Color) = (1,1,1,1)
_MainTex("Main Texture", 2D) = "white" {}
_Cutoff("Alpha cutoff", Range(0,1)) = 0.5
}
SubShader{
// change RenderType and Queue to Transparent
Tags {"Queue" = "Transparent" "IgnoreProjector" = "True" "RenderType" = "Transparent" "PreviewType" = "Plane"}
Cull Off
ZWrite Off // Add this
LOD 200
// Enable Alpha blending here
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
// Enable Alpha blending here also
#pragma surface surf SimpleLambert alphatest:_Cutoff addshadow fullforwardshadows alpha:blend
#pragma target 3.0
sampler2D _MainTex;
float4 _MainTex_TexelSize; // Add this
fixed4 _MainCol;
// include this
fixed4 RetroAA(sampler2D tex, float2 uv, float4 texelSize)
{
float2 texelCoord = uv * texelSize.zw;
float2 hfw = 0.5*fwidth(texelCoord);
float2 fl = floor(texelCoord - 0.5) + 0.5;
float2 uvaa = (fl + smoothstep(0.5 - hfw, 0.5 + hfw, texelCoord - fl))*texelSize.xy;
return tex2D(tex, uvaa);
}
half4 LightingSimpleLambert(SurfaceOutput s, half3 lightDir, half atten)
{
half4 c;
// Fix the lambert lighting implementation here
half NdotL = dot(s.Normal, lightDir);
// We set the surface rgba in surf, so don't need to do it again here.
c.rgb = s.Albedo * (NdotL * atten) * _LightColor0.rgb;
c.a = s.Alpha;
return c;
}
struct Input {
float2 uv_MainTex;
float4 color: Color; // Add this to use SpriteRenderer color
};
void surf(Input IN, inout SurfaceOutput o) {
// replace this line:
// fixed4 c = tex2D(_MainTex, IN.uv_MainTex) * _MainCol;
// with this
fixed4 c = RetroAA(_MainTex, IN.uv_MainTex, _MainTex_TexelSize);
// factor in MainCol and SpriteRenderer color/tints
o.Albedo = c.rgb * _MainCol.rgb * IN.color.rgb;
o.Alpha = c.a * _MainCol.a * IN.color.a;
}
ENDCG
}
Fallback "Transparent/Cutout/VertexLit"
}
You might need to turn down the Alpha cutoff to zero or some other low number in order to make the AA work nicely.
I want to merge the functionality of these two shaders for Unity.
First shader is main shader that adds a glitch hologram effect.
The Second shader is making a surface texture flimmer for added visual effect.
I am trying to merge my versions of the code, alternatively different passes that allows me to implement both features on same material, if possible.
I have tried shaderpass function, but cannot make it work.
Is it possible to do?
1st shadercode
Shader "Custom/Cool Hologram Original"
{
Properties
{
//This program takes an existing shader script and builds upon it, by adding the features :
//Tintcolor - allowing for manipulation of colors
//Transparency - Changing opaque in tags to transparency so that the model becomes transparent
//Properties allow defining public manipulative variables
_MainTex ("AlbedoTexture", 2D) = "white" {} //braces are useless leftover braces no longer needed according to Unity developer
_TintColor("Tint Color", Color) = (1,1,1,1) //Public variable appearing in inspector
_Transparency("Transparency", Range(0.0,0.5)) = 0.25
_CutoutTresh("Cutout Threshold", Range(0.0,1.0)) = 0.2
_Distance("Distance", Float) = 1
_Amplitude("Amplitude", Float) = 1
_Speed("Speed", Float) = 1
_Amount("Amount", Range(0.0,1.0)) = 1
}
//The actual shader program
SubShader
{
Tags {"Queue"="Transparent" "RenderType"="Transparent" } //Added queue and transparent elements because order matters (see render order queu tag)
LOD 100 //LOD means level of details and used for when player is close or far away
ZWrite Off //This is related to culling and depth testing, controlling if something is written to depth buffer. Zwrite off is for transparent objects
Blend SrcAlpha OneMinusSrcAlpha //See blend factors in unity doc. It is about render order and how they should blend
Pass //A pass can be made for each type of situation, like Virtual Reality etc
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc" //Shaders does not use inheritance, it has got monobehaviour instead, so it must be defined as #included
struct appdata //Structs are objects with 2 objects each. appdata is passed in to v2f vert as argument
{
float4 vertex : POSITION; //Variable with 4 floating point numbers x,y,z,w. POSITION is a semantic binding telling where it is to be used
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION; //SV_Position corresponds to screen position in Unity
};
sampler2D _MainTex;
float4 _MainTex_ST;
float4 _TintColor;
float _Transparency;
float _CutoutThresh;
float _Distance;
float _Amplitude;
float _Speed;
float _Amount;
v2f vert (appdata v)
{
v2f o;
v.vertex.x += sin(_Time.y * _Speed + v.vertex.y * _Amplitude) * _Distance * _Amount; //Allows sinusoidal movement to vertices in object space b4 translation to clip
o.vertex = UnityObjectToClipPos(v.vertex); //See learnopengl.com to see 5 spaces: local space, world space, view space, clip space, screen space
o.uv = TRANSFORM_TEX(v.uv, _MainTex); //Taking uv data from model and data from maintexture (see the shader texture in inspector)
return o; //Returns struct after it has been build in coordinate system, it will then be passed to fragment function
}
fixed4 frag (v2f i) : SV_Target //Takes in v2f struct calling it i and then bind it to render target, which is frame buffer for screen
{
// sample the texture
fixed4 col = tex2D(_MainTex, i.uv) + _TintColor; //fixed 4 col is color. Can be fixed rgbA for 3 colors and an alpha channel. tex2D reads in colors from maintex and struct data from i
col.a = _Transparency; //The colors are what actually drawed in this feature with tex3D
clip(col.r - _CutoutThresh); //Not drawing pixels with certain amount of red color
return col;
}
ENDCG
}
}
}
2nd shadercode
Shader "Custom/Offset 1"
{
Properties {
_MainTex ("Texture", 2D) = "white" {}
_ScrollSpeeds ("Scroll Speeds", vector) = (0, -3, 0, 0)
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
// Declare our new parameter here so it's visible to the CG shader
float4 _ScrollSpeeds;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
// Shift the uvs over time.
o.uv += _ScrollSpeeds * _Time.x;
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
// sample the texture
fixed4 col = tex2D(_MainTex, i.uv);
// apply fog
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
ENDCG
}
}
}
3rd shadercode
Shader "Custom/Combined"
{
Properties
{
_MainTex ("AlbedoTexture", 2D) = "white" {}
_HoloColor("Hologram Color", Color) = (1,1,1,1)
_Transparency("Transparency", Range(0.0,0.5)) = 0.25
_CutoutTresh("Cutout Threshold", Range(0.0,1.0)) = 0.2
_Distance("Distance", Float) = 1
_Amplitude("Amplitude", Float) = 1
_Speed("Speed", Float) = 1
_ScrollSpeeds("Scroll Speeds", vector) = (0,-3,0,0) //added from offset shader
_Amount("Amount", Range(0.0,1.0)) = 1
}
SubShader {
Tags {"Queue"="Transparent" "RenderType"="Transparent" }
LOD 100
ZWrite Off
Blend SrcAlpha OneMinusSrcAlpha
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f {
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1) //added from offset shader
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
float4 _HoloColor;
float4 _ScrollSpeeds;
float _Transparency;
float _CutoutThresh;
float _Distance;
float _Amplitude;
float _Speed;
float _Amount;
v2f vert (appdata v)
{
v2f o;
v.vertex.x += sin(_Time.y * _Speed + v.vertex.y * _Amplitude) * _Distance * _Amount;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
o.uv += _ScrollSpeeds * _Time.x; //Added from offset shader
UNITY_TRANSFER_FOG(o,o.vertex); //added from offset shader
return o;
}
fixed4 frag (v2f i) : SV_Target
{
fixed4 col = tex2D(_MainTex, i.uv) + _HoloColor;
col.a = _Transparency;
clip(col.r - _CutoutThresh);
UNITY_APPLY_FOG(i.fogCoord, col); //Added from offset shader
return col;
}
ENDCG
}
}
Fallback "Diffuse"
}
I am making a 360 viewer in unity, to view a 360 photo I used to have a cubemap attached to a skybox, and it worked great. But the weight of the cubemaps forced me to switch to textures.
All of the 360 viewer tutorials say to just put a sphere with a shader on it, and put the camera inside. When I do this, it doesn't work very well, because when I look to the top or bottom, I see the image warped like so: (The chairs are suppossed to look normal)
It did not happen when I used a skybox.
Does any one know why is this happening?
Thank you very much!
The shader you choose does not handle equirectangular distortion very well. At the poles (top and bottom) of the sphere much image information has to be mapped on very small space which leads to the artifacts you are seeing.
You can write a specialized shader to improve the coordinate mapping from your equirectangular image onto the sphere. At the Unity forums a specialized shader has been posted.
Shader "Custom/Equirectangular" {
Properties {
_Color ("Main Color", Color) = (1,1,1,1)
_MainTex ("Diffuse (RGB) Alpha (A)", 2D) = "gray" {}
}
SubShader{
Pass {
Tags {"LightMode" = "Always"}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
#pragma glsl
#pragma target 3.0
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct v2f
{
float4 pos : SV_POSITION;
float3 normal : TEXCOORD0;
};
v2f vert (appdata v)
{
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
o.normal = v.normal;
return o;
}
sampler2D _MainTex;
#define PI 3.141592653589793
inline float2 RadialCoords(float3 a_coords)
{
float3 a_coords_n = normalize(a_coords);
float lon = atan2(a_coords_n.z, a_coords_n.x);
float lat = acos(a_coords_n.y);
float2 sphereCoords = float2(lon, lat) * (1.0 / PI);
return float2(sphereCoords.x * 0.5 + 0.5, 1 - sphereCoords.y);
}
float4 frag(v2f IN) : COLOR
{
float2 equiUV = RadialCoords(IN.normal);
return tex2D(_MainTex, equiUV);
}
ENDCG
}
}
FallBack "VertexLit"
}
Again, it's not my own code but I tested it on android devices and as a standalone PC version. It results in very smooth poles.
Please note: This shader does not flip normals of your sphere. So if you want your camera to sit inside the sphere you have to invert its normals with a 3d program or with the shader. Try adding Cull Front after line 9 above and the shader will apply its texture to the "wrong" side of the model.
I'm a beginner and I had to do a lot just to understand this thread. This is what worked for me. I just combined the answers and put it in one script. I'm pretty sure I will forget this in a few weeks time, so putting it here for posterity.
Shader "Custom/Equirectangular" {
Properties {
_Color ("Main Color", Color) = (1,1,1,1)
_MainTex ("Diffuse (RGB) Alpha (A)", 2D) = "gray" {}
}
SubShader{
Pass {
Tags {"LightMode" = "Always"}
Cull Front
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
#pragma glsl
#pragma target 3.0
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct v2f
{
float4 pos : SV_POSITION;
float3 normal : TEXCOORD0;
};
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.normal = v.normal;
return o;
}
sampler2D _MainTex;
#define PI 3.141592653589793
inline float2 RadialCoords(float3 a_coords)
{
float3 a_coords_n = normalize(a_coords);
float lon = atan2(a_coords_n.z, a_coords_n.x);
float lat = acos(a_coords_n.y);
float2 sphereCoords = float2(lon, lat) * (1.0 / PI);
return float2(1 - (sphereCoords.x * 0.5 + 0.5), 1 - sphereCoords.y);
}
float4 frag(v2f IN) : COLOR
{
float2 equiUV = RadialCoords(IN.normal);
return tex2D(_MainTex, equiUV);
}
ENDCG
}
}
FallBack "VertexLit"
}
This is another shader code.
'Shader "Flip Normals" {
Properties {
_MainTex ("Base (RGB)", 2D) = "white" {}
}
SubShader {
Tags { "RenderType" = "Opaque" }
Cull Front
CGPROGRAM
#pragma surface surf Lambert vertex:vert
sampler2D _MainTex;
struct Input {
float2 uv_MainTex;
float4 color : COLOR;
};
void vert(inout appdata_full v)
{
v.normal.xyz = v.normal * -1;
}
void surf (Input IN, inout SurfaceOutput o) {
fixed3 result = tex2D(_MainTex, IN.uv_MainTex);
o.Albedo = result.rgb;
o.Alpha = 1;
}
ENDCG
}
Fallback "Diffuse"
}`
I need help in applying a specular shade in a 3D model after I deformed it in CG vertex shader. I don't know how to do it in Unity shader. I already searched in google and there are no hits of what I am looking for.
Below is my current solution. The 3D object is rendered twice.
Shader "myShader/DeformSpecular" {
Properties {
_Color ("Main Color", Color) = (1,1,1,0.5)
_Shininess ("Shininess", Range (0.01, 1)) = 0.7
_MainTex ("Base (RGB)", 2D) = "white" { }
_BumpMap ("Normalmap", 2D) = "bump" {}
_cubeSize("Cube Size", Range (1, 10)) = 1
}
SubShader {
Tags { "RenderType"="Opaque" }
LOD 250
Pass{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#include "myShaderFuncs.cginc"
sampler2D _MainTex;
sampler2D _BumpMap;
half _Shininess;
float4 _Color;
float4x4 _localOrient; //local orientation
float4x4 _parentWOrient; //world orientation of parent node
float _deformParam;
struct v2f {
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
};
float4 _MainTex_ST;
v2f vert (appdata_base v)
{
v2f o;
o.pos = mul(_localOrient, v.vertex); //apply local transformation
o.pos = deformShape(_deformParam); //apply deform
o.pos = mul(_parentWOrient, o.pos); //apply parents world orientation
o.pos = mul (UNITY_MATRIX_VP, o.pos);
o.uv = TRANSFORM_TEX (v.texcoord, _MainTex);
return o;
}
half4 frag (v2f i) : COLOR
{
half4 texcol = tex2D (_MainTex, i.uv);
return texcol * _Color;
}
ENDCG
}//Pass
CGPROGRAM
#pragma surface surf MobileBlinnPhong exclude_path:prepass nolightmap noforwardadd halfasview
inline fixed4 LightingMobileBlinnPhong (SurfaceOutput s, fixed3 lightDir, fixed3 halfDir, fixed atten)
{
fixed diff = max (0, dot (s.Normal, lightDir));
fixed nh = max (0, dot (s.Normal, halfDir));
fixed spec = pow (nh, s.Specular*128) * s.Gloss;
fixed4 c;
c.rgb = (s.Albedo * _LightColor0.rgb * diff + _LightColor0.rgb * spec) * (atten*2);
c.a = 0.0;
return c;
}
sampler2D _MainTex;
sampler2D _BumpMap;
half _Shininess;
struct Input {
float2 uv_MainTex;
};
void surf (Input IN, inout SurfaceOutput o) {
fixed4 tex = tex2D(_MainTex, IN.uv_MainTex);
o.Albedo = tex.rgb;
o.Gloss = tex.a;
o.Alpha = tex.a;
o.Specular = _Shininess;
o.Normal = UnpackNormal (tex2D(_BumpMap, IN.uv_MainTex));
}
ENDCG
}//SubShader
FallBack "Mobile/VertexLit"
}
If I understood you correctly, you want to do specular lighting calculations inside your CG shader?
Unity gives you lighting info that you can access to do your own calculations if you wish:
http://docs.unity3d.com/Documentation/Components/SL-BuiltinValues.html
However, the Doc seems outdated and some variables are wrong as its point out here:
http://answers.unity3d.com/questions/411114/shaderlab-builtin-lighting-properties-are-not-corr.html
once you use these variables you can calculate the lighting as you want