Normal estimation of signed distance function just won't work - unity3d

I'm trying to create a compute shader in Unity that performs raymarching. So far I have an SDF of a box and a ray generation / marching function all of which seems to work fine:
struct Ray {
float3 position;
float3 direction;
};
Ray CreateCameraRay(float2 uv) {
Ray ray;
ray.position = mul(CameraToWorld, float4(0, 0, 0, 1)).xyz;
ray.direction = mul(CameraInverseProjection, float4(uv, 0, 1)).xyz;
ray.direction = mul(CameraToWorld, float4(ray.direction, 0)).xyz;
ray.direction = normalize(ray.direction);
return ray;
}
float sdf_box (float3 p)
{
float3 c = float3(0.0f, 0.0f, 0.0f);
float3 s = float3(1.0f, 1.0f, 1.0f);
float x = max
( p.x - c.x - float3(s.x / 2., 0, 0),
c.x - p.x - float3(s.x / 2., 0, 0)
);
float y = max
( p.y - c.y - float3(s.y / 2., 0, 0),
c.y - p.y - float3(s.y / 2., 0, 0)
);
float z = max
( p.z - c.z - float3(s.z / 2., 0, 0),
c.z - p.z - float3(s.z / 2., 0, 0)
);
float d = x;
d = max(d,y);
d = max(d,z);
return d;
}
[numthreads(32,32,1)]
void CSMain (uint3 id : SV_DispatchThreadID)
{
uint width, height;
Result.GetDimensions(width, height);
float2 uv = id.xy / float2(width, height) * 2.0 - 1.0;
Ray ray = CreateCameraRay(uv);
float4 colour = float4(0, 0, 0, 0);
bool cont = true;
int step = 0;
while (cont) {
step++;
float dist = sdf_box(ray.position);
ray.position += ray.direction * dist;
if (dist < 0.01f) {
colour = float4(ray.position, 0.0f);
}
cont = (dist > 0.01f) && (step < 500);
}
Result[id.xy] = colour;
}
The above code generates the following image:
The next step is to add a bit of lighting. To do this I need a surface normal function, which I've defined as:
float EPSILON = 0.001f;
float3 normal(float3 p) {
float2 e = float2(EPSILON, 0);
return normalize(float3(
sdf_box(p + e.xyy) - sdf_box(p + e.xyy),
sdf_box(p + e.yxy) - sdf_box(p + e.yxy),
sdf_box(p + e.yyx) - sdf_box(p + e.yyx))
);
}
Now, to check that the normal function is working properly I decided to draw the normal of each contact point on the box (by replacing colour = float4(ray.position, 0.0f); with colour = float4(normal(ray.position), 0.0f). What I'm expecting is for 3 of the 6 face of the cube to be coloured red, green and blue (i.e. the top face should be green as it has a normal of (0, 1, 0)). However, what I get instead is a totally black cube:
This seems wrong. The normalize() should mean that there is at least some colour if the normal generated is non-zero, which suggests to me that the normal of all the points is just (0, 0, 0). But this can't be right because nowhere in or on the cube has a surface normal of 0 (other than the centre point).
I've now tried 4 different normal evaluation functions which I've found online, NONE of which work. I'm seriously questioning my sanity at this point.

Nevermind, I am stupid:
float EPSILON = 0.001f;
float3 normal(float3 p) {
float2 e = float2(EPSILON, 0);
return normalize(float3(
sdf_box(p + e.xyy) - sdf_box(p + e.xyy),
sdf_box(p + e.yxy) - sdf_box(p + e.yxy),
sdf_box(p + e.yyx) - sdf_box(p + e.yyx))
);
}
Should clearly be:
float EPSILON = 0.001f;
float3 normal(float3 p) {
float2 e = float2(EPSILON, 0);
return normalize(float3(
sdf_box(p + e.xyy) - sdf_box(p - e.xyy),
sdf_box(p + e.yxy) - sdf_box(p - e.yxy),
sdf_box(p + e.yyx) - sdf_box(p - e.yyx))
);
}
In my defense, I wrote 3 different normal functions before this one which also didn't work.
EDIT:
Strangely enough, after messing around a little bit more, the normal is always (0, 0, 0) when the above code is run, but not when the below code is run?
float3 normal(float3 p) {
float2 e = float2(0.001f, 0);
return normalize(float3(
sdf_box(p + e.xyy) - sdf_box(p - e.xyy),
sdf_box(p + e.yxy) - sdf_box(p - e.yxy),
sdf_box(p + e.yyx) - sdf_box(p - e.yyx))
);
}
No idea why.

Related

Generating a normal map from a height map in compute shader?

The problem is that when I tried converting height map to normal map. The results are wrong. For some reason there is 3 light sources that is emitting from top (green), right (red), and left (blue) in the texture.
This is the GeoMath.hlsl code that I am using
static const float PI = 3.141592653589793238462643383279;
float2 longitudeLatitudeToUV(float2 longLat) {
float longitude = longLat[0];
float latitude = longLat[1];
float u = longitude / (2 * PI) + 0.5;
float v = latitude / PI + 0.5;
return float2(u,v);
}
float3 longitudeLatitudeToPoint(float2 longLat) {
float longitude = longLat[0];
float latitude = longLat[1];
float x;
float y;
float z;
y = sin(latitude);
float r = cos(latitude);
x = sin(longitude) * r;
z = -cos(longitude) * r;
return float3(x, y, z);
}
float2 uvToLongitudeLatitude(float2 uv) {
float longitude = (uv.x - 0.5) * (2 * PI);
float latitude = (uv.y - 0.5) * PI;
return float2(longitude, latitude);
}
float2 pointToLongitudeLatitude(float3 p) {
float longitude = atan2(p.x, p.z);
float latitude = asin(p.y);
return float2(longitude, latitude);
}
float2 pointToUV(float3 p) {
p = normalize(p);
return longitudeLatitudeToUV(pointToLongitudeLatitude(p));
}
This is the compute shader I am using to convert height map into normal map.
#pragma kernel CSMain
#include "GeoMath.hlsl"
Texture2D<float> _HeightMap;
RWTexture2D<float4> _NormalMap;
int _TextureSize_Width;
int _TextureSize_Height;
float _WorldRadius;
float _HeightMultiplier;
float3 CalculateWorldPoint(uint2 texCoord)
{
float2 uv = texCoord / float2(_TextureSize_Width - 1, _TextureSize_Height - 1);
float2 longLat = uvToLongitudeLatitude(uv);
float3 spherePoint = longitudeLatitudeToPoint(longLat);
float height01 = _HeightMap[texCoord].r + 1.0;
float worldHeight = _WorldRadius + height01 * _HeightMultiplier;
return spherePoint * worldHeight;
}
uint2 WrapIndex(uint2 texCoord)
{
texCoord.x = (texCoord.x + _TextureSize_Width) % _TextureSize_Width;
texCoord.y = max(min(_TextureSize_Height - 1, texCoord.y), 0);
return texCoord;
}
[numthreads(8,8,1)]
void CSMain (uint3 id : SV_DispatchThreadID)
{
float3 normalVector;
float3 posNorth = CalculateWorldPoint(WrapIndex(id.xy + uint2(0, 1)));
float3 posSouth = CalculateWorldPoint(WrapIndex(id.xy + uint2(0, -1)));
float3 posEast = CalculateWorldPoint(WrapIndex(id.xy + uint2(1, 0)));
float3 posWest = CalculateWorldPoint(WrapIndex(id.xy + uint2(-1, 0)));
float3 dirNorth = normalize(posNorth - posSouth);
float3 dirEast = normalize(posEast - posWest);
normalVector = normalize(cross(dirNorth, dirEast));
_NormalMap[id.xy] = float4(normalVector, 1.0);
}
And this is the result I am getting is down below height map (top), generated normal map from the code above (bottom)
I believe that you are trying to get object space normals.
But there is tiny detail is missing.
Possible values for normalized vector3 are -1..1 for each axis.
And possible values for pixel: 0..1.
You just need to adjust ranges.
This line roughly fixes problem:
_NormalMap[id.xy] = float4(normalVector / 2 + float3(0.5, 0.5, 0.5), 1.0);
Result

Shader works just fine in Unity Editor, but became black in WebGL build

I am working on a project which encodes sensor values at different positions into a 3d heatmap of a building. I use a vertex shader for this purpose and this works just fine in Editor:example, but after I built the scene in WebGL, this turned out to be black.
I has tried using constant loop indices or always include this shader in project settings etc., but none of these works. Here are some of the code:
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.worldPos = mul(unity_ObjectToWorld, v.vertex);
o.screenPos = ComputeScreenPos(o.vertex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
//...
float2 boxIntersection(in float3 ro, in float3 rd, in float3 rad)
{
float3 m = 1.0 / rd;
float3 n = m * ro;
float3 k = abs(m) * rad;
float3 t1 = -n - k;
float3 t2 = -n + k;
float tN = max(max(t1.x, t1.y), t1.z);
float tF = min(min(t2.x, t2.y), t2.z);
if (tN > tF || tF < 0.0) return float2(-1.0, -1.0); // no intersection
return float2(tN, tF);
}
//p in object space
float SampleValue(float3 p) {
float totalValue = 0.0;
float denom = 0.0;
for (int i = 0; i < 34; ++i) { // _DataSize
float4 sd = _SensorData[i];
float dist = length(p - sd.xyz);
totalValue += sd.w / (dist * dist);
denom += 1.0 / (dist * dist);
}
if (denom == 0.0) {
return 0.0;
}
return totalValue / denom;
}
float4 transferFunction(float value) {
float tv = (value - _DataScale.x) / (_DataScale.y - _DataScale.x); // _DataScale.x, _DataScale.y
float4 col = tex2D(_TransferTexture, float2(0.5, tv));
col.w *= _Strength; // _Strength
return float4(col.xyz * col.w, col.w);
}
float4 rayMarch(float3 ro, float3 rd, float dp) {
float3 ro1 = mul(unity_WorldToObject, float4(ro, 1.0));
float3 rd1 = mul(unity_WorldToObject, rd);
float2 t = boxIntersection(ro1, rd1, float3(1, 1, 1) * 0.5);
t.x = length(mul(unity_ObjectToWorld, float4(ro1 + rd1 * max(t.x, 0.0), 1.0)) - ro);
t.y = length(mul(unity_ObjectToWorld, float4(ro1 + rd1 * t.y, 1.0)) - ro);
t.y = min(t.y, dp);
float4 acc = float4(0.0, 0.0, 0.0, 1.0);
float totalDelta = (t.y - t.x);
float delta = totalDelta / float(_RM_Samples - 1.0);
float3 p = ro + t.x * rd;
for (int i = 0; i < 34; ++i) { // _RM_Samples
float v = SampleValue(p);
float4 tf = transferFunction(v);
float tr = exp(-tf.w * delta);
acc.xyz += tf.xyz * acc.w * delta;
acc.w *= tr;
p += delta * rd;
}
return float4(acc.xyz, (1.0 - acc.w) * step(t.x, t.y));
}
fixed4 frag(v2f i) : SV_Target
{
float2 tc = i.screenPos.xy / i.screenPos.w;
float depth = UNITY_SAMPLE_DEPTH(tex2D(_CameraDepthTexture, tc));
float eD = LinearEyeDepth(depth);
float3 ro = _WorldSpaceCameraPos;
float3 rd = normalize(i.worldPos - ro);
float4 col = rayMarch(ro, rd, eD);
//if (col.w < 1) col = float4(1, 0, 0, 1);
//else col = float4(0, 1, 0, 1);
if (wingCullPlaneValue(i.worldPos.xyz) == 0 || cullPlaneValue(i.worldPos.xyz) == 0) {
discard;
}
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
Since this works fine in Editor, I don't think there is any error in boxIntersection or rayMarching functions. I wonder if there is anything special in WebGl that it processes the pixels differently, and I has to tweak some codes accordingly. I am new to WebGL and Shader, and would appreciate any help or advice, thanks in advance.
that's because of the step function.
Approximate it with your own sigmoid function (might be expensive)
float emulated_step(float a, float x){
return 1.0/(1+pow(1000, -(x-a)*8192));
}

How to create Ofscreen Enemy indicator in Unity 3D?

I am working on creating an Ofscreen Enemy indicator using the tutorial mentioned on below link. However I can get the indicator to rotate to point to the enemy but the indicator does not move from end to end of screen.
http://gamedevelopment.tutsplus.com/tutorials/positioning-on-screen-indicators-to-point-to-off-screen-targets--gamedev-6644
This is the desired outcome:
Until now i have managed to figure out the below Please help.
var screenCenter:Vector3 = new Vector3(0.5, 0.5, 0f);
//Note coordinates are translated
//Make 00 the centre of the screen instead of bottom left
screenpos -= screenCenter;
//find angle from center of screen instead of bototom left
var angle:float = Mathf.Atan2(screenpos.y, screenpos.x);
angle -= 90 * Mathf.Deg2Rad;
var cos:float = Mathf.Cos(angle);
var sin:float = -Mathf.Cos(angle);
screenpos = screenCenter + new Vector3(sin*150, cos*150, 0);
//y=mx + b format
var m:float = cos/sin;
var ScreenBounds:Vector3 = screenCenter;// * 0.9f;
//Check up and down first
if(cos > 0){
screenpos = new Vector3(ScreenBounds.y/m, ScreenBounds.y, 0);
}else{//down
screenpos = new Vector3(-ScreenBounds.y/m, -ScreenBounds.y, 0);
}
//If out of bound then get point on appropriate side
if(screenpos.x > ScreenBounds.x){//Out of bound must be on right
screenpos = new Vector3(ScreenBounds.x, ScreenBounds.y*m, 0);
}else if(screenpos.x < ScreenBounds.x){//Out of bound must be on left
screenpos = new Vector3(-ScreenBounds.x, -ScreenBounds.y*m, 0);
}
//Remove the co ordinate translation
screenpos += screenCenter;
var DistanceIndicatorRectT = DistanceIndicator.GetComponent(RectTransform);
DistanceIndicatorRectT.localPosition = new Vector3(screenpos.x * scrWidth/2, screenpos.y * scrHeight/2, DistanceIndicatorRectT.localPosition.z * screenpos.z);
DistanceIndicator.transform.rotation = Quaternion.Euler(0, 0, angle*Mathf.Rad2Deg);
I did a bit of a different approach than you, what Carlos suggested but without using physics.
If "t" is your target, this way you can get it's position on screen in pixels (if it's off screen it just goes to negative values or values higher that width)
Vector3 targetPosOnScreen = Camera.main.WorldToScreenPoint (t.position);
And this function that return a bool whether the Vector3 (in pixels) is on screen
bool onScreen(Vector2 input){
return !(input.x > Screen.width || input.x < 0 || input.y > Screen.height || input.y < 0);
}
First thing we should do is check if the target is on screen, if it's not then proceed with code.
if (onScreen (targetPosOnScreen)) {
//Some code to destroy indicator or make it invisible
return;
}
Then a simple calculation of angle between center of screen and target.
Vector3 center = new Vector3 (Screen.width / 2f, Screen.height / 2f, 0);
float angle = Mathf.Atan2(targetPosOnScreen.y-center.y, targetPosOnScreen.x-center.x) * Mathf.Rad2Deg;
Next part of code determines where the object is compared to camera based on angle we just calculated.
float coef;
if (Screen.width > Screen.height)
coef = Screen.width / Screen.height;
else
coef = Screen.height / Screen.width;
float degreeRange = 360f / (coef + 1);
if(angle < 0) angle = angle + 360;
int edgeLine;
if(angle < degreeRange / 4f) edgeLine = 0;
else if (angle < 180 - degreeRange / 4f) edgeLine = 1;
else if (angle < 180 + degreeRange / 4f) edgeLine = 2;
else if (angle < 360 - degreeRange / 4f) edgeLine = 3;
else edgeLine = 0;
http://s23.postimg.org/ytpm82ad7/Untitled_1.png
Image represents what value "edgeLine" will have based on target position (red represents camera's view) and black lines division of space.
And then we have this code which sets Transform "t2" (indicator) to correct position and angle.
t2.position = Camera.main.ScreenToWorldPoint(intersect(edgeLine, center, targetPosOnScreen)+new Vector3(0,0,10));
t2.eulerAngles = new Vector3 (0, 0, angle);
Below we have function "intersect" which code is:
Vector3 intersect(int edgeLine, Vector3 line2point1, Vector3 line2point2){
float[] A1 = {-Screen.height, 0, Screen.height, 0};
float[] B1 = {0, -Screen.width, 0, Screen.width};
float[] C1 = {-Screen.width * Screen.height,-Screen.width * Screen.height,0, 0};
float A2 = line2point2.y - line2point1.y;
float B2 = line2point1.x - line2point2.x;
float C2 = A2 * line2point1.x + B2 * line2point1.y;
float det = A1[edgeLine] * B2 - A2 * B1[edgeLine];
return new Vector3 ((B2 * C1[edgeLine] - B1[edgeLine] * C2) / det, (A1[edgeLine] * C2 - A2 * C1[edgeLine]) / det, 0);
}
We send to this function index of which line of camera's view (rectangle) we need to check intersection with, and construct a line between center of screen and target position.
For better explanation of this function look here : https://www.topcoder.com/community/data-science/data-science-tutorials/geometry-concepts-line-intersection-and-its-applications/
I just modified values of A1, B1 and C1, each of them is now array of 4 and each values represents value needed for one line of camera's view (rectangle).
If you want to implement margins just change the pivot of indicator (put the actual sprite renderer as child and move it in local space as you want).
Next thing would be making this work for array of targets and putting all those targets in given array. Hope this helps and don't be too hard on me, it's my first time posting here :)
Create a rectangle box collider delimiting the borders of the screen and use Physics2D.Raycast in the direction of the enemy.
The point of collision will tell you where the green arrow needs to be drawn.
In the example above, there is an error with the definition of the angle of visibility of a straight rectangle.
private void SetIndicatorPosition(Indicator obj)
{
var target = obj.Target;
var indicator = obj.PointToTarget;
if (target == null)
{
indicator.SetActive(false);
return;
}
Vector3 targetPosOnScreen = cam.WorldToScreenPoint(target.transform.position);
if (onScreen(targetPosOnScreen))
{
indicator.SetActive(false);
return;
}
indicator.SetActive(true);
Vector3 center = new Vector3(Screen.width / 2f, Screen.height / 2f, 0);
float angle = Mathf.Atan2(targetPosOnScreen.y - center.y, targetPosOnScreen.x - center.x) * Mathf.Rad2Deg;
float scale;
if (Screen.width > Screen.height)
scale = Screen.width / Screen.height;
else
scale = Screen.height / Screen.width;
float degreeRange = 360f / (scale + 1);
float angle2 = Mathf.Atan2(Screen.height - center.y, Screen.width - center.x) * Mathf.Rad2Deg;
if (angle < 0) angle = angle + 360;
int edgeLine;
if (angle < angle2) edgeLine = 0;
else if (angle < 180 - angle2) edgeLine = 1;
else if (angle < 180 + angle2) edgeLine = 2;
else if (angle < 360 - angle2) edgeLine = 3;
else edgeLine = 0;
indicator.transform.position = Camera.main.ScreenToWorldPoint(Intersect(edgeLine, center, targetPosOnScreen));
indicator.transform.eulerAngles = new Vector3(0, 0, angle);
}
Vector3 Intersect(int edgeLine, Vector3 line2point1, Vector3 line2point2)
{
float[] A1 = { -Screen.height, 0, Screen.height, 0 };
float[] B1 = { 0, -Screen.width, 0, Screen.width };
float[] C1 = { -Screen.width * Screen.height, -Screen.width * Screen.height, 0, 0 };
float A2 = line2point2.y - line2point1.y;
float B2 = line2point1.x - line2point2.x;
float C2 = A2 * line2point1.x + B2 * line2point1.y;
float det = A1[edgeLine] * B2 - A2 * B1[edgeLine];
var x = (B2 * C1[edgeLine] - B1[edgeLine] * C2) / det;
var y = (A1[edgeLine] * C2 - A2 * C1[edgeLine]) / det;
return new Vector3(x, y, 0);
}
bool onScreen(Vector2 input)
{
return !(input.x > Screen.width || input.x < 0 || input.y > Screen.height || input.y < 0);
}
public class Indicator
{
public GameObject Target { get; private set; }
public GameObject PointToTarget { get; private set; }
public Indicator(GameObject target, GameObject pointToTarget, ObjectTypeEnum type)
{
Target = target;
PointToTarget = pointToTarget;
var texture = pointToTarget.GetComponentInChildren<UITexture>();
if (texture != null)
{
texture.color = Helper.GetHintColor(type);
}
}
}
You can call in update
foreach (var obj in listIndicator)
{
SetIndicatorPosition(obj);
}

iOS OpenGL ES 2.0 Quaternion Rotation Slerp to XYZ Position

I am following the quaternion tutorial: http://www.raywenderlich.com/12667/how-to-rotate-a-3d-object-using-touches-with-opengl and am trying to rotate a globe to some XYZ location. I have an initial quaternion and generate a random XYZ location on the surface of the globe. I pass that XYZ location into the following function. The idea was to generate a lookAt vector with GLKMatrix4MakeLookAt and define the end Quaternion for the slerp step from the lookAt matrix.
- (void)rotateToLocationX:(float)x andY:(float)y andZ:(float)z {
// Turn on the interpolation for smooth rotation
_slerping = YES; // Begin auto rotating to this location
_slerpCur = 0;
_slerpMax = 1.0;
_slerpStart = _quat;
// The eye location is defined by the look at location multiplied by this modifier
float modifier = 1.0;
// Create a look at vector for which we will create a GLK4Matrix from
float xEye = x;
float yEye = y;
float zEye = z;
//NSLog(#"%f %f %f %f %f %f",xEye, yEye, zEye, x, y, z);
_currentSatelliteLocation = GLKMatrix4MakeLookAt(xEye, yEye, zEye, 0, 0, 0, 0, 1, 0);
_currentSatelliteLocation = GLKMatrix4Multiply(_currentSatelliteLocation,self.effect.transform.modelviewMatrix);
// Turn our 4x4 matrix into a quat and use it to mark the end point of our interpolation
//_currentSatelliteLocation = GLKMatrix4Translate(_currentSatelliteLocation, 0.0f, 0.0f, GLOBAL_EARTH_Z_LOCATION);
_slerpEnd = GLKQuaternionMakeWithMatrix4(_currentSatelliteLocation);
// Print info on the quat
GLKVector3 vec = GLKQuaternionAxis(_slerpEnd);
float angle = GLKQuaternionAngle(_slerpEnd);
//NSLog(#"%f %f %f %f",vec.x,vec.y,vec.z,angle);
NSLog(#"Quat end:");
[self printMatrix:_currentSatelliteLocation];
//[self printMatrix:self.effect.transform.modelviewMatrix];
}
The interpolation works, I get a smooth rotation, however the ending location is never the XYZ I input - I know this because my globe is a sphere and I am calculating XYZ from Lat Lon. I want to look directly down the 'lookAt' vector toward the center of the earth from that lat/lon location on the surface of the globe after the rotation. I think it may have something to do with the up vector but I've tried everything that made sense.
What am I doing wrong - How can I define a final quaternion that when I finish rotating, looks down a vector to the XYZ on the surface of the globe? Thanks!
Is the following your meaning:
Your globe center is (0, 0, 0), radius is R, the start position is (0, 0, R), your final position is (0, R, 0), so rotate the globe 90 degrees around X-asix?
If so, just set lookat function eye position to your final position, the look at parameters to the globe center.
m_target.x = 0.0f;
m_target.y = 0.0f;
m_target.z = 1.0f;
m_right.x = 1.0f;
m_right.y = 0.0f;
m_right.z = 0.0f;
m_up.x = 0.0f;
m_up.y = 1.0f;
m_up.z = 0.0f;
void CCamera::RotateX( float amount )
{
Point3D target = m_target;
Point3D up = m_up;
amount = amount / 180 * PI;
m_target.x = (cos(PI / 2 - amount) * up.x) + (cos(amount) * target.x);
m_target.y = (cos(PI / 2 - amount) * up.y) + (cos(amount) * target.y);
m_target.z = (cos(PI / 2 - amount) * up.z) + (cos(amount) * target.z);
m_up.x = (cos(amount) * up.x) + (cos(PI / 2 + amount) * target.x);
m_up.y = (cos(amount) * up.y) + (cos(PI / 2 + amount) * target.y);
m_up.z = (cos(amount) * up.z) + (cos(PI / 2 + amount) * target.z);
Normalize(m_target);
Normalize(m_up);
}
void CCamera::RotateY( float amount )
{
Point3D target = m_target;
Point3D right = m_right;
amount = amount / 180 * PI;
m_target.x = (cos(PI / 2 + amount) * right.x) + (cos(amount) * target.x);
m_target.y = (cos(PI / 2 + amount) * right.y) + (cos(amount) * target.y);
m_target.z = (cos(PI / 2 + amount) * right.z) + (cos(amount) * target.z);
m_right.x = (cos(amount) * right.x) + (cos(PI / 2 - amount) * target.x);
m_right.y = (cos(amount) * right.y) + (cos(PI / 2 - amount) * target.y);
m_right.z = (cos(amount) * right.z) + (cos(PI / 2 - amount) * target.z);
Normalize(m_target);
Normalize(m_right);
}
void CCamera::RotateZ( float amount )
{
Point3D right = m_right;
Point3D up = m_up;
amount = amount / 180 * PI;
m_up.x = (cos(amount) * up.x) + (cos(PI / 2 - amount) * right.x);
m_up.y = (cos(amount) * up.y) + (cos(PI / 2 - amount) * right.y);
m_up.z = (cos(amount) * up.z) + (cos(PI / 2 - amount) * right.z);
m_right.x = (cos(PI / 2 + amount) * up.x) + (cos(amount) * right.x);
m_right.y = (cos(PI / 2 + amount) * up.y) + (cos(amount) * right.y);
m_right.z = (cos(PI / 2 + amount) * up.z) + (cos(amount) * right.z);
Normalize(m_right);
Normalize(m_up);
}
void CCamera::Normalize( Point3D &p )
{
float length = sqrt(p.x * p.x + p.y * p.y + p.z * p.z);
if (1 == length || 0 == length)
{
return;
}
float scaleFactor = 1.0 / length;
p.x *= scaleFactor;
p.y *= scaleFactor;
p.z *= scaleFactor;
}
The answer to this question is a combination of the following rotateTo function and a change to the code from Ray's tutorial at ( http://www.raywenderlich.com/12667/how-to-rotate-a-3d-object-using-touches-with-opengl ). As one of the comments on that article says there is an arbitrary factor of 2.0 being multiplied in GLKQuaternion Q_rot = GLKQuaternionMakeWithAngleAndVector3Axis(angle * 2.0, axis);. Remove that "2" and use the following function to create the _slerpEnd - after that the globe will rotate smoothly to XYZ specified.
// Rotate the globe using Slerp interpolation to an XYZ coordinate
- (void)rotateToLocationX:(float)x andY:(float)y andZ:(float)z {
// Turn on the interpolation for smooth rotation
_slerping = YES; // Begin auto rotating to this location
_slerpCur = 0;
_slerpMax = 1.0;
_slerpStart = _quat;
// Create a look at vector for which we will create a GLK4Matrix from
float xEye = x;
float yEye = y;
float zEye = z;
_currentSatelliteLocation = GLKMatrix4MakeLookAt(xEye, yEye, zEye, 0, 0, 0, 0, 1, 0);
// Turn our 4x4 matrix into a quat and use it to mark the end point of our interpolation
_slerpEnd = GLKQuaternionMakeWithMatrix4(_currentSatelliteLocation);
}

OpenGL Projection matrix won't allow displaying anything

I'm trying to get some basic OpenGL-ES with Shaders to run on the iPhone, based on some examples.
For some reason my projection matrix refuses to result in something on the screen. It feels like a clipping plane is set very near but that contradicts with the values I supply. If I render the same scene with an Orthogonal projection matrix I see my object just no perspective obviously.
Here's the code that generates the projection matrix:
esPerspective(&proj, 45.f, 768.0/1024.0, 1.f, 10000.f);
void esPerspective(ESMatrix *result, float fovy, float aspect, float nearZ, float farZ)
{
float frustumW, frustumH;
frustumH = tanf( fovy / 360.0f * PI ) * nearZ;
frustumW = frustumH * aspect;
esFrustum( result, -frustumW, frustumW, -frustumH, frustumH, nearZ, farZ );
}
void esFrustum(ESMatrix *result, float left, float right, float bottom, float top, float nearZ, float farZ)
{
float deltaX = right - left;
float deltaY = top - bottom;
float deltaZ = farZ - nearZ;
ESMatrix frust;
if ( (nearZ <= 0.0f) || (farZ <= 0.0f) ||
(deltaX <= 0.0f) || (deltaY <= 0.0f) || (deltaZ <= 0.0f) )
return;
frust.m[0][0] = 2.0f * nearZ / deltaX;
frust.m[0][1] = frust.m[0][2] = frust.m[0][3] = 0.0f;
frust.m[1][1] = 2.0f * nearZ / deltaY;
frust.m[1][0] = frust.m[1][2] = frust.m[1][3] = 0.0f;
frust.m[2][0] = (right + left) / deltaX;
frust.m[2][1] = (top + bottom) / deltaY;
frust.m[2][2] = -(nearZ + farZ) / deltaZ;
frust.m[2][3] = -1.0f;
frust.m[3][2] = -2.0f * nearZ * farZ / deltaZ;
frust.m[3][0] = frust.m[3][1] = frust.m[3][3] = 0.0f;
esMatrixMultiply(result, &frust, result);
}
My projection matrix comes out as:
[3.21, 0, 0, 0]
[0, 2.41, 0, 0]
[0, 0, -1, -1]
[0, 0, -2, 0]
Even if I manually set the [3][3] cell to 1 I still don't see anything.
Any ideas?
Swap your rows and columns over (ie transpose).
Well your projection matrix is transposed either way. You have a row major projection matrix ...