I'm using the following library:
https://github.com/tengbao/vanta/blob/master/src/vanta.halo.js
A demo can be found here:
https://www.vantajs.com/?effect=halo
If I'm using a bright (or even white) background color, the effect is not visible anymore.
With my limited WebGL knowledge, my guess is that this is because of the subtraction of the background color (mixedColor = texture2D(...) - backgroundColor) (but I could be wrong).
void main() {
vec2 res2 = iResolution.xy * iDpr;
vec2 uv = gl_FragCoord.xy / res2; // 0 to 1
vec4 oldImage = texture2D(iBuffer, uv);
vec3 mixedColor = oldImage.rgb - backgroundColor;
float cropDist = 0.01;
float cropXOffset = 0.2;
float cropYOffset = 0.2;
vec2 offset = uv + vec2((mixedColor.g - cropXOffset) * cropDist, (mixedColor.r - cropYOffset) * cropDist);
float spinDist = 0.001;
float spinSpeed = 0.2 + 0.15 * cos(iTime * 0.5);
float timeFrac = mod(iTime, 6.5);
vec2 offset2 = uvBig + vec2(cos(timeFrac * spinSpeed) * spinDist, sin(timeFrac * spinSpeed) * spinDist);
mixedColor = texture2D(iBuffer, offset).rgb * 0.4
+ texture2D(iBuffer, offset2).rgb * 0.6
- backgroundColor;
float fadeAmt = 0.0015; // fade this amount each frame // 0.002
mixedColor = (mixedColor - fadeAmt) * .995;
vec4 spectrum = abs( abs( .95*atan(uv.x, uv.y) -vec4(0,2,4,0) ) -3. )-1.;
float angle = atan(pixel.x, pixel.y);
float dist = length(pixel - mouse2*0.15) * 8. + sin(iTime) * .01;
float flowerPeaks = .05 * amplitudeFactor * size;
float flowerPetals = 7.;
float edge = abs((dist + sin(angle * flowerPetals + iTime * 0.5) * sin(iTime * 1.5) * flowerPeaks) * 0.65 / size);
float colorChangeSpeed = 0.75 + 0.05 * sin(iTime) * 1.5;
float rainbowInput = timeFrac * colorChangeSpeed;
float brightness = 0.7;
vec4 rainbow = sqrt(j2hue(cos(rainbowInput))) + vec4(baseColor,0) - 1.0 + brightness;
float factor = smoothstep(1., .9, edge) * pow(edge, 2.);
vec3 color = rainbow.rgb * smoothstep(1., .9, edge) * pow(edge, 20.);
vec4 ring = vec4(
backgroundColor + clamp( mixedColor + color, 0., 1.)
, 1.0);
gl_FragColor = ring;
}
However I'm not able to figure out, how to adapt the behavior, so I can use a bright background.
If I remove the subtraction (and also remove the addition of the same at the end (vec4 ring = vec4(clamp(...))), I get the correct effect but with a black background.
Does anyone have an idea how to adapt the shader?
The problem is likely that backgroundColor is being added to the color to calculate the ring value. This will blow out your final color if backgroundColor is too bright.
Related
I followed a tutorial online that basically draws a shape on a plane. I want to make only the shape visible, while the background of the plane invisible, so that I can put it onto another plane instead.
I'm extremely new to ThreeJS and GLSL Shaders so I'm completely lost on this, any help is welcome, thank you.
My Fragment Shader:
uniform float time;
uniform float progress;
uniform vec2 mouse;
uniform sampler2D matcap;
uniform vec4 resolution;
varying vec2 vUv;
varying vec3 vPosition;
float PI = 3.141592653589793238;
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c, oc * axis.x * axis.y - axis.z * s, oc * axis.z * axis.x + axis.y * s, 0.0,
oc * axis.x * axis.y + axis.z * s, oc * axis.y * axis.y + c, oc * axis.y * axis.z - axis.x * s, 0.0,
oc * axis.z * axis.x - axis.y * s, oc * axis.y * axis.z + axis.x * s, oc * axis.z * axis.z + c, 0.0,
0.0, 0.0, 0.0, 1.0);
}
vec2 getmatcap(vec3 eye, vec3 normal) {
vec3 reflected = reflect(eye, normal);
float m = 2.8284271247461903 * sqrt( reflected.z+1.0 );
return reflected.xy / m + 0.5;
}
vec3 rotate(vec3 v, vec3 axis, float angle) {
mat4 m = rotationMatrix(axis, angle);
return (m * vec4(v, 1.0)).xyz;
}
float smin( float a, float b, float k )
{
float h = clamp( 0.5+0.5*(b-a)/k, 0.0, 1.0 );
return mix( b, a, h ) - k*h*(1.0-h);
}
float sdSphere( vec3 p, float r ){
return length(p)-r;
}
float sdBox( vec3 p, vec3 b ){
vec3 q = abs(p) - b;
return length(max(q,0.0)) + min(max(q.x,max(q.y,q.z)),0.);
}
float rand(vec2 co){
return fract(sin(dot(co.xy, vec2(12.9898, 78.233))) * 43758.5453);
}
float sdf(vec3 p){
vec3 p1 = rotate(p,vec3(1.),time/5.);
float box = smin(sdBox(p1,vec3(0.2)),sdSphere(p,0.2), 0.3);
float realsphere = sdSphere(p1, 0.1);
float final = mix(box,realsphere,progress);
for(float i = 0.; i < 10.; i++) {
float randOffset = rand(vec2(i,0.));
float progr = 1. - fract(time/3. + randOffset*3.);
vec3 pos = vec3(sin(randOffset*2.*PI),cos(randOffset*2.*PI),0.);
float gotoCenter = sdSphere(p-pos*progr, 0.1);
final = smin(final,gotoCenter,0.1);
}
float mouseSphere = sdSphere(p - vec3(mouse*2.,0.),0.1);
return smin(final, mouseSphere, 0.1);
}
vec3 calcNormal( in vec3 p )
{
const float eps = 0.0001;
const vec2 h = vec2(eps,0);
return normalize( vec3(sdf(p+h.xyy) - sdf(p-h.xyy),
sdf(p+h.yxy) - sdf(p-h.yxy),
sdf(p+h.yyx) - sdf(p-h.yyx) ) );
}
void main() {
float dist = length(vUv - vec2(0.5));
vec3 bg = mix(vec3(0.3),vec3(0.0), dist);
vec3 camPos = vec3(0.,0.,2.);
vec3 ray = normalize(vec3(vUv - vec2(0.5),-1));
vec3 rayPos = camPos;
float t = 0.;
float tMax = 5.;
for(int i = 0; i < 256; i++) {
vec3 pos = camPos + t*ray;
float h = sdf(pos);
if(h<0.0001 || t>tMax) break;
t+=h;
}
vec3 color = bg;
if(t<tMax){
vec3 pos = camPos + t*ray;
color = vec3(1.);
vec3 normal = calcNormal(pos);
color = normal;
float diff = dot(vec3(1.),normal);
vec2 matcapUV = getmatcap(ray,normal);
color = vec3(diff);
color = texture2D(matcap,matcapUV).rgb;
float fresnel = pow(1. + dot(ray,normal),3.);
color = mix(color,bg,fresnel);
}
gl_FragColor = vec4(color,1.);
}
Depth fragment shader is A.frag
#version 430 core
uniform float pointSize;
uniform mat4 projectMatrix;
in vec3 eyeSpacePos;
void main(){
vec3 normal;
normal.xy = gl_PointCoord.xy * vec2(2.0, -2.0) + vec2(-1.0,1.0);
float mag = dot(normal.xy, normal.xy);
if(mag > 1.0) discard;
normal.z = sqrt(1.0 - mag);
vec4 pixelEyePos = vec4(eyeSpacePos + normal * pointSize, 1.0f);
vec4 pixelClipPos = projectMatrix * pixelEyePos;
float ndcZ = pixelClipPos.z / pixelClipPos.w;
gl_FragDepth = ndcZ;
}
accepth the depth map shader is B.frag
void main(){
float pixelDepth = texture(u_DepthTex, Texcoord).r;
gl_FragDepth = outDepth;
}
How can convert the pixelDepth into the camera space at B.frag? I have tried many times without success.
The ndc coordinate is in range [-1.0, 1.0] the depth has to be in dept range, which is by default [0.0, 1.0]:
gl_FragDepth = ndcZ * 0.5 - 0.5;
I'm trying to implement shadows in the beam material from UE4 DMX Plugin, so I can use it with a flashlight and stop worrying about light trails cause by volumetric fog, the problem is that I really don't know how, actually, I don't know anything about HLSL.
So, if any tech artist wizard can help me and explain like I'm five or give me some material to study, it would be awesome.
Lightbeam code:
float traversalDepth = FDepth - NDepth ;
uint numSteps = floor(traversalDepth / StepSize) ;
float3 posOffset = normalize(FSlice-NSlice) * StepSize ;
float Adj = AdjOpp.x;
float Opp = AdjOpp.y + ConeRadius;
float3 cumul = 0;
for(uint i=0; i<numSteps; i++){
///Position & depth at rayHit
float3 pos = NSlice + posOffset * i ;
float depth = NDepth + StepSize * i ;
float dist = length(pos);
float falloff = 1.0f-(length(pos)/MaxDistance);
///Domain Transform
pos.z = -pos.z;
pos /= float3(Opp*2,Opp*2,Adj);
float div = ConeRadius / Opp;
div = (pos.z*(1-div))+div;
pos.xy /= div;
//Falloff old
//float falloff = 1.0-saturate(length(pos));
//Center domain
pos.z -= 0.5 ;
///Clip domain edges.
float maskX = (1-abs(pos.x)) > 0.5 ;
float maskY = (1-abs(pos.y)) > 0.5 ;
float maskZ = (1-abs(pos.z)) > 0.5 ;
if( (maskX*maskY*maskZ) - 0.5 < 0 ) continue ;
///Soft clipping with scene depth.
float dClip = saturate((ScDepth-depth)/SoftClipSize);
// UVs from pos
pos.xy = saturate(pos.xy+0.5);
float2 GoboUV = pos.xy;
float2 ColorUV = pos.xy;
// Gobo scale offset
GoboUV.x = GoboUV.x / NumGobos;
GoboUV.x = GoboUV.x + (GoboIndex/NumGobos) ;
// Gobo scrolling
GoboUV.x = GoboUV.x + (Time*GoboScrollingSpeed);
// Sample Gobo
float GoboSample = TXTpGobo.SampleLevel(TXTpGoboSampler,GoboUV.xy,0) ;
// Color Wheel scale offset
ColorUV.x = ColorUV.x / NumColors;
ColorUV.x = ColorUV.x + (ColorIndex/NumColors) ;
// Color scrolling
ColorUV.x = ColorUV.x + ((Time-CurrentTime) * ColorScrollingSpeed);
Material:
https://blueprintue.com/blueprint/zf-3xwb_/
Beam Material
I'm currently trying to port a shadertoy.com shader (Atmospheric Scattering Sample, interactive demo with code) to Unity. The shader is written in GLSL and I have to start the editor with C:\Program Files\Unity\Editor>Unity.exe -force-opengl to make it render the shader (otherwise a "This shader cannot be run on this GPU" error comes up), but that's not a problem right now. The problem is with porting that shader to Unity.
The functions for the scattering etc. are all identical and "runnable" in my ported shader, the only thing is that the mainImage() functions manages the camera, light directions and ray direction itself. This has to be ofcourse changed sothat Unity's camera position, view direction and light sources and directions are used.
The main function of the original looks like this:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// default ray dir
vec3 dir = ray_dir( 45.0, iResolution.xy, fragCoord.xy );
// default ray origin
vec3 eye = vec3( 0.0, 0.0, 2.4 );
// rotate camera
mat3 rot = rot3xy( vec2( 0.0, iGlobalTime * 0.5 ) );
dir = rot * dir;
eye = rot * eye;
// sun light dir
vec3 l = vec3( 0, 0, 1 );
vec2 e = ray_vs_sphere( eye, dir, R );
if ( e.x > e.y ) {
discard;
}
vec2 f = ray_vs_sphere( eye, dir, R_INNER );
e.y = min( e.y, f.x );
vec3 I = in_scatter( eye, dir, e, l );
fragColor = vec4( I, 1.0 );
}
I've read through the documentation of that function and how it's supposed work at https://www.shadertoy.com/howto .
Image shaders implement the mainImage() function in order to generate
the procedural images by computing a color for each pixel. This
function is expected to be called once per pixel, and it is
responsability of the host application to provide the right inputs to
it and get the output color from it and assign it to the screen pixel.
The prototype is:
void mainImage( out vec4 fragColor, in vec2 fragCoord );
where fragCoord contains the pixel coordinates for which the shader
needs to compute a color. The coordinates are in pixel units, ranging
from 0.5 to resolution-0.5, over the rendering surface, where the
resolution is passed to the shader through the iResolution uniform
(see below).
The resulting color is gathered in fragColor as a four component
vector, the last of which is ignored by the client. The result is
gathered as an "out" variable in prevision of future addition of
multiple render targets.
So in that function there are references to iGlobalTime to make the camera rotate with time and references to the iResolution for the resolution. I've embedded the shader in a Unity shader and tried to fix and wireup the dir, eye and l sothat it works with Unity, but I'm completly stuck. I get some sort of picture which looks "related" to the original shader: (Top is original, buttom the current unity state)
I'm not a shader profesional, I only know some basics of OpenGL, but for the most part, I write game logic in C#, so all I could really do was look at other shader examples and look at how I could get the data about camera, lightsources etc. in this code, but as you can see, nothing works out, really.
I've copied the skelton-code for the shader from https://en.wikibooks.org/wiki/GLSL_Programming/Unity/Specular_Highlights and some vectors from http://forum.unity3d.com/threads/glsl-shader.39629/ .
I hope someone can point me in some direction on how to fix this shader / correctly port it to unity. Below is the current shader code, all you have to do to reproduce it is create a new shader in a blank project, copy that code inside, make a new material, assign the shader to that material, then add a sphere and add that material on it and add a directional light.
Shader "Unlit/AtmoFragShader" {
Properties{
_MainTex("Base (RGB)", 2D) = "white" {}
_LC("LC", Color) = (1,0,0,0) /* stuff from the testing shader, now really used */
_LP("LP", Vector) = (1,1,1,1)
}
SubShader{
Tags{ "Queue" = "Geometry" } //Is this even the right queue?
Pass{
//Tags{ "LightMode" = "ForwardBase" }
GLSLPROGRAM
/* begin port by copying in the constants */
// math const
const float PI = 3.14159265359;
const float DEG_TO_RAD = PI / 180.0;
const float MAX = 10000.0;
// scatter const
const float K_R = 0.166;
const float K_M = 0.0025;
const float E = 14.3; // light intensity
const vec3 C_R = vec3(0.3, 0.7, 1.0); // 1 / wavelength ^ 4
const float G_M = -0.85; // Mie g
const float R = 1.0; /* this is the radius of the spehere? this should be set from the geometry or something.. */
const float R_INNER = 0.7;
const float SCALE_H = 4.0 / (R - R_INNER);
const float SCALE_L = 1.0 / (R - R_INNER);
const int NUM_OUT_SCATTER = 10;
const float FNUM_OUT_SCATTER = 10.0;
const int NUM_IN_SCATTER = 10;
const float FNUM_IN_SCATTER = 10.0;
/* begin functions. These are out of the defines because they should be accesible to anyone. */
// angle : pitch, yaw
mat3 rot3xy(vec2 angle) {
vec2 c = cos(angle);
vec2 s = sin(angle);
return mat3(
c.y, 0.0, -s.y,
s.y * s.x, c.x, c.y * s.x,
s.y * c.x, -s.x, c.y * c.x
);
}
// ray direction
vec3 ray_dir(float fov, vec2 size, vec2 pos) {
vec2 xy = pos - size * 0.5;
float cot_half_fov = tan((90.0 - fov * 0.5) * DEG_TO_RAD);
float z = size.y * 0.5 * cot_half_fov;
return normalize(vec3(xy, -z));
}
// ray intersects sphere
// e = -b +/- sqrt( b^2 - c )
vec2 ray_vs_sphere(vec3 p, vec3 dir, float r) {
float b = dot(p, dir);
float c = dot(p, p) - r * r;
float d = b * b - c;
if (d < 0.0) {
return vec2(MAX, -MAX);
}
d = sqrt(d);
return vec2(-b - d, -b + d);
}
// Mie
// g : ( -0.75, -0.999 )
// 3 * ( 1 - g^2 ) 1 + c^2
// F = ----------------- * -------------------------------
// 2 * ( 2 + g^2 ) ( 1 + g^2 - 2 * g * c )^(3/2)
float phase_mie(float g, float c, float cc) {
float gg = g * g;
float a = (1.0 - gg) * (1.0 + cc);
float b = 1.0 + gg - 2.0 * g * c;
b *= sqrt(b);
b *= 2.0 + gg;
return 1.5 * a / b;
}
// Reyleigh
// g : 0
// F = 3/4 * ( 1 + c^2 )
float phase_reyleigh(float cc) {
return 0.75 * (1.0 + cc);
}
float density(vec3 p) {
return exp(-(length(p) - R_INNER) * SCALE_H);
}
float optic(vec3 p, vec3 q) {
vec3 step = (q - p) / FNUM_OUT_SCATTER;
vec3 v = p + step * 0.5;
float sum = 0.0;
for (int i = 0; i < NUM_OUT_SCATTER; i++) {
sum += density(v);
v += step;
}
sum *= length(step) * SCALE_L;
return sum;
}
vec3 in_scatter(vec3 o, vec3 dir, vec2 e, vec3 l) {
float len = (e.y - e.x) / FNUM_IN_SCATTER;
vec3 step = dir * len;
vec3 p = o + dir * e.x;
vec3 v = p + dir * (len * 0.5);
vec3 sum = vec3(0.0);
for (int i = 0; i < NUM_IN_SCATTER; i++) {
vec2 f = ray_vs_sphere(v, l, R);
vec3 u = v + l * f.y;
float n = (optic(p, v) + optic(v, u)) * (PI * 4.0);
sum += density(v) * exp(-n * (K_R * C_R + K_M));
v += step;
}
sum *= len * SCALE_L;
float c = dot(dir, -l);
float cc = c * c;
return sum * (K_R * C_R * phase_reyleigh(cc) + K_M * phase_mie(G_M, c, cc)) * E;
}
/* end functions */
/* vertex shader begins here*/
#ifdef VERTEX
const float SpecularContribution = 0.3;
const float DiffuseContribution = 1.0 - SpecularContribution;
uniform vec4 _LP;
varying vec2 TextureCoordinate;
varying float LightIntensity;
varying vec4 someOutput;
/* transient stuff */
varying vec3 eyeOutput;
varying vec3 dirOutput;
varying vec3 lOutput;
varying vec2 eOutput;
/* lighting stuff */
// i.e. one could #include "UnityCG.glslinc"
uniform vec3 _WorldSpaceCameraPos;
// camera position in world space
uniform mat4 _Object2World; // model matrix
uniform mat4 _World2Object; // inverse model matrix
uniform vec4 _WorldSpaceLightPos0;
// direction to or position of light source
uniform vec4 _LightColor0;
// color of light source (from "Lighting.cginc")
void main()
{
/* code from that example shader */
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
vec3 ecPosition = vec3(gl_ModelViewMatrix * gl_Vertex);
vec3 tnorm = normalize(gl_NormalMatrix * gl_Normal);
vec3 lightVec = normalize(_LP.xyz - ecPosition);
vec3 reflectVec = reflect(-lightVec, tnorm);
vec3 viewVec = normalize(-ecPosition);
/* copied from https://en.wikibooks.org/wiki/GLSL_Programming/Unity/Specular_Highlights for testing stuff */
//I have no idea what I'm doing, but hopefully this computes some vectors which I need
mat4 modelMatrix = _Object2World;
mat4 modelMatrixInverse = _World2Object; // unity_Scale.w
// is unnecessary because we normalize vectors
vec3 normalDirection = normalize(vec3(
vec4(gl_Normal, 0.0) * modelMatrixInverse));
vec3 viewDirection = normalize(vec3(
vec4(_WorldSpaceCameraPos, 1.0)
- modelMatrix * gl_Vertex));
vec3 lightDirection;
float attenuation;
if (0.0 == _WorldSpaceLightPos0.w) // directional light?
{
attenuation = 1.0; // no attenuation
lightDirection = normalize(vec3(_WorldSpaceLightPos0));
}
else // point or spot light
{
vec3 vertexToLightSource = vec3(_WorldSpaceLightPos0
- modelMatrix * gl_Vertex);
float distance = length(vertexToLightSource);
attenuation = 1.0 / distance; // linear attenuation
lightDirection = normalize(vertexToLightSource);
}
/* test port */
// default ray dir
//That's the direction of the camera here?
vec3 dir = viewDirection; //normalDirection;//viewDirection;// tnorm;//lightVec;//lightDirection;//normalDirection; //lightVec;//tnorm;//ray_dir(45.0, iResolution.xy, fragCoord.xy);
// default ray origin
//I think they mean the position of the camera here?
vec3 eye = vec3(_WorldSpaceCameraPos); //vec3(_WorldSpaceLightPos0); //// vec3(0.0, 0.0, 0.0); //_WorldSpaceCameraPos;//ecPosition; //vec3(0.0, 0.0, 2.4);
// rotate camera not needed, remove it
// sun light dir
//I think they mean the direciton of our directional light?
vec3 l = lightDirection;//_LightColor0.xyz; //lightDirection; //normalDirection;//normalize(vec3(_WorldSpaceLightPos0));//lightVec;// vec3(0, 0, 1);
/* this computes the intersection of the ray and the sphere.. is this really needed?*/
vec2 e = ray_vs_sphere(eye, dir, R);
/* copy stuff sothat we can use it on the fragment shader, "discard" is only allowed in fragment shader,
so the rest has to be computed in fragment shader */
eOutput = e;
eyeOutput = eye;
dirOutput = dir;
lOutput = dir;
}
#endif
#ifdef FRAGMENT
uniform sampler2D _MainTex;
varying vec2 TextureCoordinate;
uniform vec4 _LC;
varying float LightIntensity;
/* transient port */
varying vec3 eyeOutput;
varying vec3 dirOutput;
varying vec3 lOutput;
varying vec2 eOutput;
void main()
{
/* real fragment */
if (eOutput.x > eOutput.y) {
//discard;
}
vec2 f = ray_vs_sphere(eyeOutput, dirOutput, R_INNER);
vec2 e = eOutput;
e.y = min(e.y, f.x);
vec3 I = in_scatter(eyeOutput, dirOutput, eOutput, lOutput);
gl_FragColor = vec4(I, 1.0);
/*vec4 c2;
c2.x = 1.0;
c2.y = 1.0;
c2.z = 0.0;
c2.w = 1.0f;
gl_FragColor = c2;*/
//gl_FragColor = c;
}
#endif
ENDGLSL
}
}
}
Any help is appreciated, sorry for the long post and explanations.
Edit: I just found out that the radius of the spehere does have an influence on the stuff, a sphere with scale 2.0 in every direction gives a much better result. However, the picture is still completly independent of the viewing angle of the camera and any lights, this is nowhere near the shaderlab version.
It's look like you are trying to render a 2D texture over a sphere. It has some different approach. For what you trying to do, I would apply the shader over a plane crossed with the sphere.
For general purpose, look this article showing how to convert shaderToy to Unity3D.
There is some steps that I included here:
Replace iGlobalTime shader input (“shader playback time in seconds”) with _Time.y
Replace iResolution.xy (“viewport resolution in pixels”) with _ScreenParams.xy
Replace vec2 types with float2, mat2 with float2x2 etc.
Replace vec3(1) shortcut constructors in which all elements have same value with explicit float3(1,1,1)
Replace Texture2D with Tex2D
Replace atan(x,y) with atan2(y,x) <- Note parameter ordering!
Replace mix() with lerp()
Replace *= with mul()
Remove third (bias) parameter from Texture2D lookups
mainImage(out vec4 fragColor, in vec2 fragCoord) is the fragment shader function, equivalent to float4 mainImage(float2 fragCoord : SV_POSITION) : SV_Target
UV coordinates in GLSL have 0 at the top and increase downwards, in HLSL 0 is at the bottom and increases upwards, so you may need to use uv.y = 1 – uv.y at some point.
About this question:
Tags{ "Queue" = "Geometry" } //Is this even the right queue?
Queue references the order it will be rendered, Geometry is one of the first of, if you want you shader running over everything you could use Overlay for example. This topic is covered here.
Background - this render queue is rendered before any others. It is used for skyboxes and the like.
Geometry (default) - this is used for most objects. Opaque geometry uses this queue.
AlphaTest - alpha tested geometry uses this queue. It’s a separate queue from - Geometry one since it’s more efficient to render alpha-tested objects after all solid ones are drawn.
Transparent - this render queue is rendered after Geometry and AlphaTest, in back-to-front order. Anything alpha-blended (i.e. shaders that don’t write to depth buffer) should go here (glass, particle effects).
Overlay - this render queue is meant for overlay effects. Anything rendered last should go here (e.g. lens flares).
I am following the quaternion tutorial: http://www.raywenderlich.com/12667/how-to-rotate-a-3d-object-using-touches-with-opengl and am trying to rotate a globe to some XYZ location. I have an initial quaternion and generate a random XYZ location on the surface of the globe. I pass that XYZ location into the following function. The idea was to generate a lookAt vector with GLKMatrix4MakeLookAt and define the end Quaternion for the slerp step from the lookAt matrix.
- (void)rotateToLocationX:(float)x andY:(float)y andZ:(float)z {
// Turn on the interpolation for smooth rotation
_slerping = YES; // Begin auto rotating to this location
_slerpCur = 0;
_slerpMax = 1.0;
_slerpStart = _quat;
// The eye location is defined by the look at location multiplied by this modifier
float modifier = 1.0;
// Create a look at vector for which we will create a GLK4Matrix from
float xEye = x;
float yEye = y;
float zEye = z;
//NSLog(#"%f %f %f %f %f %f",xEye, yEye, zEye, x, y, z);
_currentSatelliteLocation = GLKMatrix4MakeLookAt(xEye, yEye, zEye, 0, 0, 0, 0, 1, 0);
_currentSatelliteLocation = GLKMatrix4Multiply(_currentSatelliteLocation,self.effect.transform.modelviewMatrix);
// Turn our 4x4 matrix into a quat and use it to mark the end point of our interpolation
//_currentSatelliteLocation = GLKMatrix4Translate(_currentSatelliteLocation, 0.0f, 0.0f, GLOBAL_EARTH_Z_LOCATION);
_slerpEnd = GLKQuaternionMakeWithMatrix4(_currentSatelliteLocation);
// Print info on the quat
GLKVector3 vec = GLKQuaternionAxis(_slerpEnd);
float angle = GLKQuaternionAngle(_slerpEnd);
//NSLog(#"%f %f %f %f",vec.x,vec.y,vec.z,angle);
NSLog(#"Quat end:");
[self printMatrix:_currentSatelliteLocation];
//[self printMatrix:self.effect.transform.modelviewMatrix];
}
The interpolation works, I get a smooth rotation, however the ending location is never the XYZ I input - I know this because my globe is a sphere and I am calculating XYZ from Lat Lon. I want to look directly down the 'lookAt' vector toward the center of the earth from that lat/lon location on the surface of the globe after the rotation. I think it may have something to do with the up vector but I've tried everything that made sense.
What am I doing wrong - How can I define a final quaternion that when I finish rotating, looks down a vector to the XYZ on the surface of the globe? Thanks!
Is the following your meaning:
Your globe center is (0, 0, 0), radius is R, the start position is (0, 0, R), your final position is (0, R, 0), so rotate the globe 90 degrees around X-asix?
If so, just set lookat function eye position to your final position, the look at parameters to the globe center.
m_target.x = 0.0f;
m_target.y = 0.0f;
m_target.z = 1.0f;
m_right.x = 1.0f;
m_right.y = 0.0f;
m_right.z = 0.0f;
m_up.x = 0.0f;
m_up.y = 1.0f;
m_up.z = 0.0f;
void CCamera::RotateX( float amount )
{
Point3D target = m_target;
Point3D up = m_up;
amount = amount / 180 * PI;
m_target.x = (cos(PI / 2 - amount) * up.x) + (cos(amount) * target.x);
m_target.y = (cos(PI / 2 - amount) * up.y) + (cos(amount) * target.y);
m_target.z = (cos(PI / 2 - amount) * up.z) + (cos(amount) * target.z);
m_up.x = (cos(amount) * up.x) + (cos(PI / 2 + amount) * target.x);
m_up.y = (cos(amount) * up.y) + (cos(PI / 2 + amount) * target.y);
m_up.z = (cos(amount) * up.z) + (cos(PI / 2 + amount) * target.z);
Normalize(m_target);
Normalize(m_up);
}
void CCamera::RotateY( float amount )
{
Point3D target = m_target;
Point3D right = m_right;
amount = amount / 180 * PI;
m_target.x = (cos(PI / 2 + amount) * right.x) + (cos(amount) * target.x);
m_target.y = (cos(PI / 2 + amount) * right.y) + (cos(amount) * target.y);
m_target.z = (cos(PI / 2 + amount) * right.z) + (cos(amount) * target.z);
m_right.x = (cos(amount) * right.x) + (cos(PI / 2 - amount) * target.x);
m_right.y = (cos(amount) * right.y) + (cos(PI / 2 - amount) * target.y);
m_right.z = (cos(amount) * right.z) + (cos(PI / 2 - amount) * target.z);
Normalize(m_target);
Normalize(m_right);
}
void CCamera::RotateZ( float amount )
{
Point3D right = m_right;
Point3D up = m_up;
amount = amount / 180 * PI;
m_up.x = (cos(amount) * up.x) + (cos(PI / 2 - amount) * right.x);
m_up.y = (cos(amount) * up.y) + (cos(PI / 2 - amount) * right.y);
m_up.z = (cos(amount) * up.z) + (cos(PI / 2 - amount) * right.z);
m_right.x = (cos(PI / 2 + amount) * up.x) + (cos(amount) * right.x);
m_right.y = (cos(PI / 2 + amount) * up.y) + (cos(amount) * right.y);
m_right.z = (cos(PI / 2 + amount) * up.z) + (cos(amount) * right.z);
Normalize(m_right);
Normalize(m_up);
}
void CCamera::Normalize( Point3D &p )
{
float length = sqrt(p.x * p.x + p.y * p.y + p.z * p.z);
if (1 == length || 0 == length)
{
return;
}
float scaleFactor = 1.0 / length;
p.x *= scaleFactor;
p.y *= scaleFactor;
p.z *= scaleFactor;
}
The answer to this question is a combination of the following rotateTo function and a change to the code from Ray's tutorial at ( http://www.raywenderlich.com/12667/how-to-rotate-a-3d-object-using-touches-with-opengl ). As one of the comments on that article says there is an arbitrary factor of 2.0 being multiplied in GLKQuaternion Q_rot = GLKQuaternionMakeWithAngleAndVector3Axis(angle * 2.0, axis);. Remove that "2" and use the following function to create the _slerpEnd - after that the globe will rotate smoothly to XYZ specified.
// Rotate the globe using Slerp interpolation to an XYZ coordinate
- (void)rotateToLocationX:(float)x andY:(float)y andZ:(float)z {
// Turn on the interpolation for smooth rotation
_slerping = YES; // Begin auto rotating to this location
_slerpCur = 0;
_slerpMax = 1.0;
_slerpStart = _quat;
// Create a look at vector for which we will create a GLK4Matrix from
float xEye = x;
float yEye = y;
float zEye = z;
_currentSatelliteLocation = GLKMatrix4MakeLookAt(xEye, yEye, zEye, 0, 0, 0, 0, 1, 0);
// Turn our 4x4 matrix into a quat and use it to mark the end point of our interpolation
_slerpEnd = GLKQuaternionMakeWithMatrix4(_currentSatelliteLocation);
}