OpenGL ES 2.0 (specifically for the iphone) rendering is slightly off. Best guess is it's a projection matrix problem - iphone

So I bought O'reilly's Iphone 3D programming and found what I believe to be a bug in there code. However I can't figure out what the problem is, and unless I do I can't move forward with my own code.
I will paste what I consider to be the appropriate code into this post but luckily all the code is available online at:
http://examples.oreilly.com/9780596804831/HelloCone/
The problem I am having is with their OpenGL ES 2.0 renderer, it does not show up in their ES 1.1 renderer.
So what I have been noticing is that the cone does not render exactly in the correct position. To test this I changed the ModelViewMatrix to render exactly on the FrustumNear plane. So the cone should appear cut completely in two. When I do this with the ES 1.1 render this is the case, when I do the same in OpenGL ES 2.0 however it is not. The cone is for the most part there, but slightly shaved off. Meaning it is not landing exactly on the fustrum's near face.
Here is the initialization code where the projection matrix is created and set up:
void RenderingEngine2::Initialize(int width, int height)
{
const float coneRadius = 0.5f;
const float coneHeight = 1.0f;
const int coneSlices = 40;
{
// Allocate space for the cone vertices.
m_cone.resize((coneSlices + 1) * 2);
// Initialize the vertices of the triangle strip.
vector<Vertex>::iterator vertex = m_cone.begin();
const float dtheta = TwoPi / coneSlices;
for (float theta = 0; vertex != m_cone.end(); theta += dtheta) {
// Grayscale gradient
float brightness = abs(sin(theta));
vec4 color(brightness, brightness, brightness, 1);
// Apex vertex
vertex->Position = vec3(0, 1, 0);
vertex->Color = color;
vertex++;
// Rim vertex
vertex->Position.x = coneRadius * cos(theta);
vertex->Position.y = 1 - coneHeight;
vertex->Position.z = coneRadius * sin(theta);
vertex->Color = color;
vertex++;
}
}
{
// Allocate space for the disk vertices.
m_disk.resize(coneSlices + 2);
// Initialize the center vertex of the triangle fan.
vector<Vertex>::iterator vertex = m_disk.begin();
vertex->Color = vec4(0.75, 0.75, 0.75, 1);
vertex->Position.x = 0;
vertex->Position.y = 1 - coneHeight;
vertex->Position.z = 0;
vertex++;
// Initialize the rim vertices of the triangle fan.
const float dtheta = TwoPi / coneSlices;
for (float theta = 0; vertex != m_disk.end(); theta += dtheta) {
vertex->Color = vec4(0.75, 0.75, 0.75, 1);
vertex->Position.x = coneRadius * cos(theta);
vertex->Position.y = 1 - coneHeight;
vertex->Position.z = coneRadius * sin(theta);
vertex++;
}
}
// Create the depth buffer.
glGenRenderbuffers(1, &m_depthRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, m_depthRenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER,
GL_DEPTH_COMPONENT16,
width,
height);
// Create the framebuffer object; attach the depth and color buffers.
glGenFramebuffers(1, &m_framebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, m_framebuffer);
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
m_colorRenderbuffer);
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER,
m_depthRenderbuffer);
// Bind the color buffer for rendering.
glBindRenderbuffer(GL_RENDERBUFFER, m_colorRenderbuffer);
// Set up some GL state.
glViewport(0, 0, width, height);
glEnable(GL_DEPTH_TEST);
// Build the GLSL program.
m_simpleProgram = BuildProgram(SimpleVertexShader, SimpleFragmentShader);
glUseProgram(m_simpleProgram);
// Set the projection matrix.
GLint projectionUniform = glGetUniformLocation(m_simpleProgram, "Projection");
mat4 projectionMatrix = mat4::Frustum(-1.6f, 1.6, -2.4, 2.4, 5, 10);
glUniformMatrix4fv(projectionUniform, 1, 0, projectionMatrix.Pointer());
}
And here is the Render code. As you can see I have changed the ModelVieMatrix to place the cone on the bottom left corner of the near Frustum face.
void RenderingEngine2::Render() const
{
GLuint positionSlot = glGetAttribLocation(m_simpleProgram, "Position");
GLuint colorSlot = glGetAttribLocation(m_simpleProgram, "SourceColor");
glClearColor(0.5f, 0.5f, 0.5f, 1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnableVertexAttribArray(positionSlot);
glEnableVertexAttribArray(colorSlot);
mat4 rotation(m_animation.Current.ToMatrix());
mat4 translation = mat4::Translate(-1.6, -2.4, -5);
// Set the model-view matrix.
GLint modelviewUniform = glGetUniformLocation(m_simpleProgram, "Modelview");
mat4 modelviewMatrix = rotation * translation;
glUniformMatrix4fv(modelviewUniform, 1, 0, modelviewMatrix.Pointer());
// Draw the cone.
{
GLsizei stride = sizeof(Vertex);
const GLvoid* pCoords = &m_cone[0].Position.x;
const GLvoid* pColors = &m_cone[0].Color.x;
glVertexAttribPointer(positionSlot, 3, GL_FLOAT, GL_FALSE, stride, pCoords);
glVertexAttribPointer(colorSlot, 4, GL_FLOAT, GL_FALSE, stride, pColors);
glDrawArrays(GL_TRIANGLE_STRIP, 0, m_cone.size());
}
// Draw the disk that caps off the base of the cone.
{
GLsizei stride = sizeof(Vertex);
const GLvoid* pCoords = &m_disk[0].Position.x;
const GLvoid* pColors = &m_disk[0].Color.x;
glVertexAttribPointer(positionSlot, 3, GL_FLOAT, GL_FALSE, stride, pCoords);
glVertexAttribPointer(colorSlot, 4, GL_FLOAT, GL_FALSE, stride, pColors);
glDrawArrays(GL_TRIANGLE_FAN, 0, m_disk.size());
}
glDisableVertexAttribArray(positionSlot);
glDisableVertexAttribArray(colorSlot);
}

Looks like I found the answer to my own question.
The projection matrix in the O'Reilly code is being calculated incorrectly.
In their code they have:
T a = 2 * near / (right - left);
T b = 2 * near / (top - bottom);
T c = (right + left) / (right - left);
T d = (top + bottom) / (top - bottom);
T e = - (far + near) / (far - near);
T f = -2 * far * near / (far - near);
Matrix4 m;
m.x.x = a; m.x.y = 0; m.x.z = 0; m.x.w = 0;
m.y.x = 0; m.y.y = b; m.y.z = 0; m.y.w = 0;
m.z.x = c; m.z.y = d; m.z.z = e; m.z.w = -1;
m.w.x = 0; m.w.y = 0; m.w.z = f; m.w.w = 1;
return m;
However this is not the projection matrix. m.w.w should be 0 not 1.
Matrix4 m;
m.x.x = a; m.x.y = 0; m.x.z = 0; m.x.w = 0;
m.y.x = 0; m.y.y = b; m.y.z = 0; m.y.w = 0;
m.z.x = c; m.z.y = d; m.z.z = e; m.z.w = -1;
m.w.x = 0; m.w.y = 0; m.w.z = f; m.w.w = 0;
return m;

Related

OpenGL: Translating textures along the z-coordinate is not producing any effect

I have drawn a 10x10 grid of noise textures. I would like to be able to control the depth of each texture. As a test I tried translating half of the textured objects with the z coordinate at -.5 but it seems to be having no effect. What am I missing?
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glViewport(0, 0, RectWidth(winRect), RectHeight(winRect));
glMatrixMode(GL.PROJECTION);
glLoadIdentity;
glMatrixMode(GL.MODELVIEW);
glLoadIdentity;
glEnable(GL.TEXTURE_2D);
wall_texid = glGenTextures(1);
halfTextureSize = 128; % in px for meshgrid
%plaidData = makePlaidData(halfTextureSize);
%normalizedPlaidData = (plaidData - min(plaidData)) / ( max(plaidData) - min(plaidData) );
[x,y] = meshgrid(-halfTextureSize+1:halfTextureSize,-halfTextureSize+1:halfTextureSize);
noysSlope = 1.0; %1.5;
noys = 255.*oneoverf(noysSlope, size(x,1), size(x,2)); % oneoverf -> [0:1]
noys=repmat(noys,[ 1 1 3 ]);
noys=permute(uint8(noys),[ 3 2 1 ]);
xoffset = -10:1:10;
yoffset = -10:1:10;
rmin_bg = 45.6874;% pixels
rmax_bg = 350.7631;% pixels
rstrip = 11.6268;% this cuts a strip into the fixation disk that has a height the size of the paddle height
% this code pokes out the transparent aperture
opaque = ones(size(x'));
% for i = 1:length(xoffset)
% opaque = min(opaque, ((sqrt((x'+xoffset(i)).^2+(y'+yoffset(i)).^2) > rmax_bg) |( sqrt((x'+xoffset(i)).^2+(y'+yoffset(i)).^2) < rmin_bg)));
% end
% noys(4,:,:) = shiftdim(255 .* opaque, -1);
glBindTexture(GL_TEXTURE_2D, wall_texid);
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, halfTextureSize*2, halfTextureSize*2, 0, GL_RGB, GL_UNSIGNED_BYTE, noys);
glTexEnvfv(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
%glClear(GL.COLOR_BUFFER_BIT);
corners = [0 0;
1 0;
1 1;
0 1
];
halfHeight = winRect(4)/2;
halfWidth = winRect(3)/2;
canvas = .09; % pick the larger of the two dimensions, since they are not equal
depth = 0.0;
v=[-canvas -canvas depth;...
canvas -canvas depth;...
canvas canvas depth;...
-canvas canvas depth]';
surroundTexture = glGenLists(1);
glNewList(surroundTexture, GL.COMPILE);
glBegin(GL.POLYGON);
for i = 1:4
glTexCoord2dv(corners(i,:));
glVertex3dv(v(:,i));
end
glEnd;
glEndList;
translations = struct('x',{},'y',{},'z',{});
index = 1;
offset = 0.1;
for y = -10:2:9
for x = -10:2:9
translation.x = x / 10.0 + offset;
translation.y = y / 10.0 + offset;
translations(index) = translation;
index = index+1;
end
end
%%%Draw texture grid%%%%
for i = 1:100
glPushMatrix;
if i < 51
glTranslated(translations(i).x, translations(i).y, -.5);
else
glTranslated(translations(i).x, translations(i).y, 0.0);
end
glCallList(surroundTexture);
glPopMatrix;
end
NB: I am using PsychToolBox which gives access to opengl in Matlab.

iOS OpenGL ES 2.0 Billboard Object On Sphere And Rotate With Sphere

I have a sphere (earth) in OpenGL ES 2.0 for iOS. I also have markers that I want to place at lat/lons on the earth - but I want the markers to always face the user ( billboards ) but also move with the Earth when it rotates by touch. So I've tried to research billboarding and put together the following code. This code is from the billboarding function that is called after I create the earth ( which is translated backwards 6 units on the Z axis). I can't seem to get the planes of the billboards to always face the camera but also move with the Earth as it rotates. How can I do that?
// Get the current modelview matrix
GLKMatrix4 originalMat = self.effect.transform.modelviewMatrix;
GLKMatrix4 currMat = self.effect.transform.modelviewMatrix;
// Define the buffer designators
GLuint billboardVertexArray;
GLuint billboardVertexBuffer;
GLuint billboardIndexBuffer;
glGenVertexArraysOES(1, &billboardVertexArray);
glBindVertexArrayOES(billboardVertexArray);
// Now draw the billboard
glGenBuffers(1, &billboardVertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, billboardVertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(Billboard_vertex), Billboard_vertex, GL_STATIC_DRAW);
glGenBuffers(1, &billboardIndexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, billboardIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(Billboard_index), Billboard_index, GL_STATIC_DRAW);
// u0,v0,normalx0,normaly0,normalz0,x0,y0,z0
glEnableVertexAttribArray(GLKVertexAttribPosition);
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, 8*sizeof(float), BUFFER_OFFSET(5));
glEnableVertexAttribArray(GLKVertexAttribTexCoord0);
glVertexAttribPointer(GLKVertexAttribTexCoord0, 2, GL_FLOAT, GL_FALSE, 8*sizeof(float), BUFFER_OFFSET(0));
glEnableVertexAttribArray(GLKVertexAttribNormal);
glVertexAttribPointer(GLKVertexAttribNormal, 3, GL_FLOAT, GL_FALSE, 8*sizeof(float), BUFFER_OFFSET(2));
// Enable the Earth texture
self.effect.texture2d0.name = _billBoardTextureInfo.name;
self.effect.texture2d0.target = _billBoardTextureInfo.target;
// Bind the earth vertex array
glBindVertexArrayOES(billboardVertexArray);
// Now put a billboard at a specific Lat Lon - so first
// calculate XYZ from lat lon
XYZ xyz;
xyz.x = 0; xyz.y = 0; xyz.z = 0;
[self LLAtoXYZwithLat:0 andLon:0 andXYZ:&xyz];
//NSLog(#"XYZ after: %f %f %f",xyz.x,xyz.y,xyz.z);
// Move the billboard back so we can see it
GLKMatrix4 modelViewMatrix = GLKMatrix4MakeTranslation(0.0f, 0.0f, -6.0f);
// In update, we convert the quaternion into a rotation matrix, and apply it to the model view matrix as usual.
GLKMatrix4 rotation = GLKMatrix4MakeWithQuaternion(_quat);
modelViewMatrix = GLKMatrix4Multiply(modelViewMatrix, rotation);
// First create the variation translations to anchor the billboard
GLKMatrix4 translationXYZ = GLKMatrix4MakeTranslation(xyz.x, xyz.y, xyz.z);
GLKMatrix4 translationForLatLonWithTranslation = GLKMatrix4Multiply(modelViewMatrix,translationXYZ);
// Scale this object as well
GLKMatrix4 scaledWithTranslationAndRotation = GLKMatrix4Scale(translationForLatLonWithTranslation, scale, scale, scale);
// Remove the Translation portion of the matrix
// | xx xy xz xw |
// | yx yy yz yw |
// | zx zy zz zw |
// | wx wy wz ww |
//
// | R T |
// | (0,0,0) 1 |
//
// d = sqrt( xx² + yx² + zx² )
//
// | d 0 0 T.x |
// | 0 d 0 T.y |
// | 0 0 d T.z |
// | 0 0 0 1 |
//
// union _GLKMatrix4
// {
// struct
// {
// float m00, m01, m02, m03;
// float m10, m11, m12, m13;
// float m20, m21, m22, m23;
// float m30, m31, m32, m33;
// };
// float m[16];
// }
// typedef union _GLKMatrix4 GLKMatrix4;
// Construct the rows in the new matrix
float d = sqrt( pow(currMat.m00,2) + pow(currMat.m10,2) + pow(currMat.m20,2) );
GLKVector4 columnToInsert0 = GLKVector4Make(d, 0, 0, currMat.m03+xyz.x);
GLKVector4 columnToInsert1 = GLKVector4Make(0, d, 0, currMat.m13+xyz.y);
//GLKVector4 columnToInsert2 = GLKVector4Make(0, 0, d, currMat.m23-6+xyz.z);
GLKVector4 columnToInsert3 = GLKVector4Make(0, 0, 0, 1);
// Build the new Matrix
GLKMatrix4 noTranslationInfo = GLKMatrix4SetRow(currMat, 0, columnToInsert0);
noTranslationInfo = GLKMatrix4SetRow(noTranslationInfo, 1, columnToInsert1);
//noTranslationInfo = GLKMatrix4SetRow(noTranslationInfo, 2, columnToInsert2);
noTranslationInfo = GLKMatrix4SetRow(noTranslationInfo, 3, columnToInsert3);
[self printMatrix:noTranslationInfo];
// Assign the new matrix to draw with - no translation
self.effect.transform.modelviewMatrix = noTranslationInfo;
// Render the object with GLKit
[self.effect prepareToDraw];
// Draw elements from the index array and uv / vertex / normal info
glDrawElements(GL_TRIANGLES,Billboard_polygoncount*3,GL_UNSIGNED_SHORT,0);
// Restore the original matrix
self.effect.transform.modelviewMatrix = originalMat;
This seems to work pretty good:
// Get the current modelview matrix
GLKMatrix4 originalMat = self.effect.transform.modelviewMatrix;
GLKMatrix4 currMat = self.effect.transform.modelviewMatrix;
// Print the original matrix for comparison
//NSLog(#"Original Matrix:");
//[self printMatrix:currMat];
// Define the buffer designators
GLuint billboardVertexArray;
GLuint billboardVertexBuffer;
GLuint billboardIndexBuffer;
glGenVertexArraysOES(1, &billboardVertexArray);
glBindVertexArrayOES(billboardVertexArray);
// Now draw the billboard
glGenBuffers(1, &billboardVertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, billboardVertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(Billboard_vertex), Billboard_vertex, GL_STATIC_DRAW);
glGenBuffers(1, &billboardIndexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, billboardIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(Billboard_index), Billboard_index, GL_STATIC_DRAW);
// u0,v0,normalx0,normaly0,normalz0,x0,y0,z0
glEnableVertexAttribArray(GLKVertexAttribPosition);
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, 8*sizeof(float), BUFFER_OFFSET(5));
glEnableVertexAttribArray(GLKVertexAttribTexCoord0);
glVertexAttribPointer(GLKVertexAttribTexCoord0, 2, GL_FLOAT, GL_FALSE, 8*sizeof(float), BUFFER_OFFSET(0));
glEnableVertexAttribArray(GLKVertexAttribNormal);
glVertexAttribPointer(GLKVertexAttribNormal, 3, GL_FLOAT, GL_FALSE, 8*sizeof(float), BUFFER_OFFSET(2));
// Enable the Earth texture
self.effect.texture2d0.name = _billBoardTextureInfo.name;
self.effect.texture2d0.target = _billBoardTextureInfo.target;
// Bind the earth vertex array
glBindVertexArrayOES(billboardVertexArray);
// Now put a billboard at a specific Lat Lon - so first
// calculate XYZ from lat lon
XYZ xyz;
xyz.x = 0; xyz.y = 0; xyz.z = 0;
[self LLAtoXYZwithLat:35 andLon:-97 andXYZ:&xyz];
//NSLog(#"XYZ after: %f %f %f",xyz.x,xyz.y,xyz.z);
// Scale this object as well
//GLKMatrix4 scaledWithTranslationAndRotation = GLKMatrix4Scale(translationForLatLonWithTranslation, scale, scale, scale);
// Remove the Translation portion of the matrix
// | xx xy xz xw |
// | yx yy yz yw |
// | zx zy zz zw |
// | wx wy wz ww |
//
// | R T |
// | (0,0,0) 1 |
//
// d = sqrt( xx² + yx² + zx² )
//
// | d 0 0 T.x |
// | 0 d 0 T.y |
// | 0 0 d T.z |
// | 0 0 0 1 |
//
// union _GLKMatrix4
// {
// struct
// {
// float m00, m01, m02, m03;
// float m10, m11, m12, m13;
// float m20, m21, m22, m23;
// float m30, m31, m32, m33;
// };
// float m[16];
// }
// typedef union _GLKMatrix4 GLKMatrix4;
// Construct the rows in the new matrix
float d = sqrt( pow(currMat.m00,2) + pow(currMat.m10,2) + pow(currMat.m20,2) );
GLKVector4 columnToInsert0 = GLKVector4Make(d, 0, 0, xyz.x);
GLKVector4 columnToInsert1 = GLKVector4Make(0, d, 0, xyz.y);
GLKVector4 columnToInsert2 = GLKVector4Make(0, 0, d, xyz.z);
GLKVector4 columnToInsert3 = GLKVector4Make(0, 0, 0, 1);
// Build the new Matrix
GLKMatrix4 noTranslationInfo = GLKMatrix4SetRow(currMat, 0, columnToInsert0);
noTranslationInfo = GLKMatrix4SetRow(noTranslationInfo, 1, columnToInsert1);
noTranslationInfo = GLKMatrix4SetRow(noTranslationInfo, 2, columnToInsert2);
noTranslationInfo = GLKMatrix4SetRow(noTranslationInfo, 3, columnToInsert3);
// Print out the 'no translation' matrix
//NSLog(#"No Translation Matrix:");
//[self printMatrix:noTranslationInfo];
// Get a rotation matrix from the quaternion we last rotated the globe with
GLKMatrix4 rotationMatrixFromQuaternion = GLKMatrix4MakeWithQuaternion( _quat );
//NSLog(#"Rotation Matrix from Quaternion: ");
//[self printMatrix:rotationMatrixFromQuaternion];
// Now use the matrix produced from our Quaternion to rotate the global coordinates
// of the billboard object
GLKMatrix4 rotatedNoTranslationInfo = GLKMatrix4Multiply(rotationMatrixFromQuaternion, noTranslationInfo);
//NSLog(#"rotatedNoTranslationInfo:");
//[self printMatrix:rotatedNoTranslationInfo];
// Throw the world translation coordinates in the matrix
noTranslationInfo.m30 = ( rotatedNoTranslationInfo.m30 );
noTranslationInfo.m31 = ( rotatedNoTranslationInfo.m31 );
noTranslationInfo.m32 = ( rotatedNoTranslationInfo.m32 + GLOBAL_EARTH_Z_LOCATION );
//NSLog(#"Final Matrix:");
//[self printMatrix:noTranslationInfo];
// Assign the new matrix to draw with - no translation
self.effect.transform.modelviewMatrix = noTranslationInfo;
// Render the object with GLKit
[self.effect prepareToDraw];
// Draw elements from the index array and uv / vertex / normal info
glDrawElements(GL_TRIANGLES,Billboard_polygoncount*3,GL_UNSIGNED_SHORT,0);
// Restore the original matrix
self.effect.transform.modelviewMatrix = originalMat;
When I've done this, I sent four vertices for the billboard, all from the center of the rectangle, to the vertex shader (really two sets of three, for the two triangles) and had the vertex shader displace them to always face the viewer. I describe this process in my answer here, but the relevant vertex shader code is as follows:
attribute vec4 position;
attribute vec4 inputImpostorSpaceCoordinate;
varying mediump vec2 impostorSpaceCoordinate;
varying mediump vec3 normalizedViewCoordinate;
uniform mat4 modelViewProjMatrix;
uniform mediump mat4 orthographicMatrix;
uniform mediump float sphereRadius;
void main()
{
vec4 transformedPosition;
transformedPosition = modelViewProjMatrix * position;
impostorSpaceCoordinate = inputImpostorSpaceCoordinate.xy;
transformedPosition.xy = transformedPosition.xy + inputImpostorSpaceCoordinate.xy * vec2(sphereRadius);
transformedPosition = transformedPosition * orthographicMatrix;
normalizedViewCoordinate = (transformedPosition.xyz + 1.0) / 2.0;
gl_Position = transformedPosition;
}
The inputImpostorSpaceCoordinate attribute are four coordinates that range from (-1, -1) to (1, 1) and are supplied in parallel with the four vertices that make up the rectangular billboard you're trying to present. They act to displace the vertices relative to the screen and then also can provide texture coordinates for the kind of sub-map you'd use above. I used those values to do raytracing calculations for my sphere generation, but you can ignore that part of it.

Textured layers randomly switch position

In my project I want to create 5 textured layers.
Each layer is made out of 4 textured rectangles each. These four parts of a layer are arranged so that it looks like one big texture.
The layers are partly transparent and are arranged in front of each other to create a three dimensional look.
When I run the project with only one layer enabled, everything looks fine, but as soon as I add a second (or more) layers everything gets chaotic.
Some parts go missing, other parts have a completely wrong z-Coordinate (the value itself looks fine, but the background layer is suddenly the foremost layer). Some parts even shift their x-Coordinate (this one looks good as well on setup, if I use NSLog to output all the square coordinates).
This is my setupVBOs function where I write the object coordinates into the VBOs (I have only one Index VBO because every square is the same, but an array of 5x4 VBOs to hold the coordinates for every single part of the layers.)
- (void)setupVBOs {
glGenBuffers(1, &_indexBufferLayer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _indexBufferLayer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(IndicesLayer), IndicesLayer, GL_STATIC_DRAW);
for (int layerNo = 1; layerNo < LAYER_COUNT + 1; ++layerNo)
{
for (int layerPart = 1; layerPart < LAYER_PARTS + 1; ++layerPart)
{
glGenBuffers(1, &_vertexBufferLayer[layerNo][layerPart]);
GLfloat x = -3.0 + (2.0 * (layerPart - 1));
GLfloat z = 0.0 + (50.0 * (layerNo - 1));
NSLog(#"Layer %d, Part %d: x=%f, z=%f", layerNo, layerPart, x, z);
// Alter the Texture slightly to
// remove errors from compression (x-Coord.)
Vertex Vertices[] = {
{{x + 1.0, -1.0, z}, {0.9865, 1.0}},
{{x + 1.0, +1.0, z}, {0.9865, 0}},
{{x - 1.0, +1.0, z}, {0.01, 0}},
{{x - 1.0, -1.0, z}, {0.01, 1.0}}
};
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferLayer[layerNo][layerPart]);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertices), Vertices, GL_STATIC_DRAW);
x = z = 0;
}
}
}
This is my render Function where I draw everything and add the textures.
- (void)render: (CADisplayLink*)displayLink {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
CC3GLMatrix *projection = [CC3GLMatrix matrix];
float h = 4.0f * self.frame.size.height / self.frame.size.width;
[projection populateFromFrustumLeft:-1 andRight:1 andBottom:-h/4 andTop:h/4 andNear:2 andFar:500];
CC3GLMatrix *modelView = [CC3GLMatrix matrix];
// Translate the Modelviewmatrix
[modelView populateFromTranslation:CC3VectorMake(_cameraX, _cameraY, -5.0)];
// Rotate the Modelviewmatrix
[modelView rotateBy:CC3VectorMake(_currentRotation, 0, 90)];
[modelView translateByZ:_cameraZoom];
//
// Draw all layers
//
for (int layerNo = 1; layerNo < LAYER_COUNT + 1; layerNo++)
{
GLfloat layerFactor = (LAYER_COUNT + 1 - layerNo) * 22.0;
GLfloat scaleFactor = 100.0 + layerFactor;
[modelView scaleByX:scaleFactor];
[modelView scaleByY:scaleFactor];
for (int layerPart = 1; layerPart < LAYER_PARTS + 1; layerPart++)
{
glUniformMatrix4fv(_modelViewUniform, 1, 0, modelView.glMatrix);
glUniformMatrix4fv(_projectionUniform, 1, 0, projection.glMatrix);
GLuint uniformTexture = glGetUniformLocation(programHandle, "Texture");
// Bind Buffer and Texture
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferLayer[layerNo][layerPart]);
// Activate Texturing Pipeline and Bind Texture
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, _layers[layerNo][layerPart][0]);
glUniform1i(uniformTexture, 0);
// Vertex Shader calls
glVertexAttribPointer(_positionSlot, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*) 0);
glVertexAttribPointer(_texCoordSlot, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*) (sizeof(float) * 3));
glEnableVertexAttribArray(_positionSlot);
glEnableVertexAttribArray(_texCoordSlot);
glDrawElements(GL_TRIANGLES, sizeof(IndicesLayer)/sizeof(IndicesLayer[0]), GL_UNSIGNED_BYTE, 0);
glDisableVertexAttribArray(_texCoordSlot);
glDisableVertexAttribArray(_positionSlot);
}
[modelView scaleByX:1/scaleFactor];
[modelView scaleByY:1/scaleFactor];
}
[_context presentRenderbuffer:GL_RENDERBUFFER];
}
Use:
glenable(GL_TEXTURE_2D)
And also:
glActiveTexture — select active texture unit

Ray Tracing question, how to map screen coordinates to world coordinates?

I was studying Ray Tracing on http://www.devmaster.net/articles/raytracing_series/part1.php when I came across this piece of code:
void Engine::InitRender()
{
// set first line to draw to
m_CurrLine = 20;
// set pixel buffer address of first pixel
m_PPos = 20 * m_Width;
// screen plane in world space coordinates
m_WX1 = -4, m_WX2 = 4, m_WY1 = m_SY = 3, m_WY2 = -3;
// calculate deltas for interpolation
m_DX = (m_WX2 - m_WX1) / m_Width;
m_DY = (m_WY2 - m_WY1) / m_Height;
m_SY += 20 * m_DY;
// allocate space to store pointers to primitives for previous line
m_LastRow = new Primitive*[m_Width];
memset( m_LastRow, 0, m_Width * 4 );
}
I'm quite confused on how the author map screen coordinates to world coordinates...
Can anyone please tell me how the author derived these lines?
Or tell me how one would map screen coordinates to world coordinates?
// screen plane in world space coordinates
m_WX1 = -4, m_WX2 = 4, m_WY1 = m_SY = 3, m_WY2 = -3;
Thank you in advance!
EDIT: Here is relevant code from raytracer.cpp:
// render scene
vector3 o( 0, 0, -5 );
// initialize timer
int msecs = GetTickCount();
// reset last found primitive pointer
Primitive* lastprim = 0;
// render remaining lines
for(int y = m_CurrLine; y < (m_Height - 20); y++)
{
m_SX = m_WX1;
// render pixels for current line
for ( int x = 0; x < m_Width; x++ )
{
// fire primary ray
Color acc( 0, 0, 0 );
vector3 dir = vector3( m_SX, m_SY, 0 ) - o;
NORMALIZE( dir );
Ray r( o, dir );
float dist;
Primitive* prim = Raytrace( r, acc, 1, 1.0f, dist );
int red = (int)(acc.r * 256);
int green = (int)(acc.g * 256);
int blue = (int)(acc.b * 256);
if (red > 255) red = 255;
if (green > 255) green = 255;
if (blue > 255) blue = 255;
m_Dest[m_PPos++] = (red << 16) + (green << 8) + blue;
m_SX += m_DX;
}
m_SY += m_DY;
// see if we've been working to long already
if ((GetTickCount() - msecs) > 100)
{
// return control to windows so the screen gets updated
m_CurrLine = y + 1;
return false;
}
}
return true;
Therefore the camera is at (0,0,-5) and the screen onto which the world is being projected has top-left corner (-4,3,0) and bottom-right corner (4,-3,0).

OpenCV: how to rotate IplImage?

I need to rotate an image by very small angle, like 1-5 degrees. Does OpenCV provide simple way of doing that? From reading docs i can assume that getAffineTransform() should be involved, but there is no direct example of doing something like:
IplImage *rotateImage( IplImage *source, double angle);
If you use OpenCV > 2.0 it is as easy as
using namespace cv;
Mat rotateImage(const Mat& source, double angle)
{
Point2f src_center(source.cols/2.0F, source.rows/2.0F);
Mat rot_mat = getRotationMatrix2D(src_center, angle, 1.0);
Mat dst;
warpAffine(source, dst, rot_mat, source.size());
return dst;
}
Note: angle is in degrees, not radians.
See the C++ interface documentation for more details and adapt as you need:
getRotationMatrix
warpAffine
Edit: To down voter: Please comment the reason for down voting a tried and tested code?
#include "cv.h"
#include "highgui.h"
#include "math.h"
int main( int argc, char** argv )
{
IplImage* src = cvLoadImage("lena.jpg", 1);
IplImage* dst = cvCloneImage( src );
int delta = 1;
int angle = 0;
int opt = 1; // 1: rotate & zoom
// 0: rotate only
double factor;
cvNamedWindow("src", 1);
cvShowImage("src", src);
for(;;)
{
float m[6];
CvMat M = cvMat(2, 3, CV_32F, m);
int w = src->width;
int h = src->height;
if(opt)
factor = (cos(angle*CV_PI/180.) + 1.05) * 2;
else
factor = 1;
m[0] = (float)(factor*cos(-angle*2*CV_PI/180.));
m[1] = (float)(factor*sin(-angle*2*CV_PI/180.));
m[3] = -m[1];
m[4] = m[0];
m[2] = w*0.5f;
m[5] = h*0.5f;
cvGetQuadrangleSubPix( src, dst, &M);
cvNamedWindow("dst", 1);
cvShowImage("dst", dst);
if( cvWaitKey(1) == 27 )
break;
angle =(int)(angle + delta) % 360;
}
return 0;
}
UPDATE: See the following code for rotation using warpaffine
https://code.google.com/p/opencvjp-sample/source/browse/trunk/cpp/affine2_cpp.cpp?r=48
#include <cv.h>
#include <highgui.h>
using namespace cv;
int
main(int argc, char **argv)
{
// (1)load a specified file as a 3-channel color image,
// set its ROI, and allocate a destination image
const string imagename = argc > 1 ? argv[1] : "../image/building.png";
Mat src_img = imread(imagename);
if(!src_img.data)
return -1;
Mat dst_img = src_img.clone();
// (2)set ROI
Rect roi_rect(cvRound(src_img.cols*0.25), cvRound(src_img.rows*0.25), cvRound(src_img.cols*0.5), cvRound(src_img.rows*0.5));
Mat src_roi(src_img, roi_rect);
Mat dst_roi(dst_img, roi_rect);
// (2)With specified three parameters (angle, rotation center, scale)
// calculate an affine transformation matrix by cv2DRotationMatrix
double angle = -45.0, scale = 1.0;
Point2d center(src_roi.cols*0.5, src_roi.rows*0.5);
const Mat affine_matrix = getRotationMatrix2D( center, angle, scale );
// (3)rotate the image by warpAffine taking the affine matrix
warpAffine(src_roi, dst_roi, affine_matrix, dst_roi.size(), INTER_LINEAR, BORDER_CONSTANT, Scalar::all(255));
// (4)show source and destination images with a rectangle indicating ROI
rectangle(src_img, roi_rect.tl(), roi_rect.br(), Scalar(255,0,255), 2);
namedWindow("src", CV_WINDOW_AUTOSIZE);
namedWindow("dst", CV_WINDOW_AUTOSIZE);
imshow("src", src_img);
imshow("dst", dst_img);
waitKey(0);
return 0;
}
Check my answer to a similar problem:
Rotating an image in C/C++
Essentially, use cvWarpAffine - I've described how to get the 2x3 transformation matrix from the angle in my previous answer.
Updating full answer for OpenCV 2.4 and up
// ROTATE p by R
/**
* Rotate p according to rotation matrix (from getRotationMatrix2D()) R
* #param R Rotation matrix from getRotationMatrix2D()
* #param p Point2f to rotate
* #return Returns rotated coordinates in a Point2f
*/
Point2f rotPoint(const Mat &R, const Point2f &p)
{
Point2f rp;
rp.x = (float)(R.at<double>(0,0)*p.x + R.at<double>(0,1)*p.y + R.at<double>(0,2));
rp.y = (float)(R.at<double>(1,0)*p.x + R.at<double>(1,1)*p.y + R.at<double>(1,2));
return rp;
}
//COMPUTE THE SIZE NEEDED TO LOSSLESSLY STORE A ROTATED IMAGE
/**
* Return the size needed to contain bounding box bb when rotated by R
* #param R Rotation matrix from getRotationMatrix2D()
* #param bb bounding box rectangle to be rotated by R
* #return Size of image(width,height) that will compleley contain bb when rotated by R
*/
Size rotatedImageBB(const Mat &R, const Rect &bb)
{
//Rotate the rectangle coordinates
vector<Point2f> rp;
rp.push_back(rotPoint(R,Point2f(bb.x,bb.y)));
rp.push_back(rotPoint(R,Point2f(bb.x + bb.width,bb.y)));
rp.push_back(rotPoint(R,Point2f(bb.x + bb.width,bb.y+bb.height)));
rp.push_back(rotPoint(R,Point2f(bb.x,bb.y+bb.height)));
//Find float bounding box r
float x = rp[0].x;
float y = rp[0].y;
float left = x, right = x, up = y, down = y;
for(int i = 1; i<4; ++i)
{
x = rp[i].x;
y = rp[i].y;
if(left > x) left = x;
if(right < x) right = x;
if(up > y) up = y;
if(down < y) down = y;
}
int w = (int)(right - left + 0.5);
int h = (int)(down - up + 0.5);
return Size(w,h);
}
/**
* Rotate region "fromroi" in image "fromI" a total of "angle" degrees and put it in "toI" if toI exists.
* If toI doesn't exist, create it such that it will hold the entire rotated region. Return toI, rotated imge
* This will put the rotated fromroi piece of fromI into the toI image
*
* #param fromI Input image to be rotated
* #param toI Output image if provided, (else if &toI = 0, it will create a Mat fill it with the rotated image roi, and return it).
* #param fromroi roi region in fromI to be rotated.
* #param angle Angle in degrees to rotate
* #return Rotated image (you can ignore if you passed in toI
*/
Mat rotateImage(const Mat &fromI, Mat *toI, const Rect &fromroi, double angle)
{
//CHECK STUFF
// you should protect against bad parameters here ... omitted ...
//MAKE OR GET THE "toI" MATRIX
Point2f cx((float)fromroi.x + (float)fromroi.width/2.0,fromroi.y +
(float)fromroi.height/2.0);
Mat R = getRotationMatrix2D(cx,angle,1);
Mat rotI;
if(toI)
rotI = *toI;
else
{
Size rs = rotatedImageBB(R, fromroi);
rotI.create(rs,fromI.type());
}
//ADJUST FOR SHIFTS
double wdiff = (double)((cx.x - rotI.cols/2.0));
double hdiff = (double)((cx.y - rotI.rows/2.0));
R.at<double>(0,2) -= wdiff; //Adjust the rotation point to the middle of the dst image
R.at<double>(1,2) -= hdiff;
//ROTATE
warpAffine(fromI, rotI, R, rotI.size(), INTER_CUBIC, BORDER_CONSTANT, Scalar::all(0));
//& OUT
return(rotI);
}
IplImage* rotate(double angle, float centreX, float centreY, IplImage* src, bool crop)
{
int w=src->width;
int h=src->height;
CvPoint2D32f centre;
centre.x = centreX;
centre.y = centreY;
CvMat* warp_mat = cvCreateMat(2, 3, CV_32FC1);
cv2DRotationMatrix(centre, angle, 1.0, warp_mat);
double m11= cvmGet(warp_mat,0,0);
double m12= cvmGet(warp_mat,0,1);
double m13= cvmGet(warp_mat,0,2);
double m21= cvmGet(warp_mat,1,0);
double m22= cvmGet(warp_mat,1,1);
double m23= cvmGet(warp_mat,1,2);
double m31= 0;
double m32= 0;
double m33= 1;
double x=0;
double y=0;
double u0= (m11*x + m12*y + m13)/(m31*x + m32*y + m33);
double v0= (m21*x + m22*y + m23)/(m31*x + m32*y + m33);
x=w;
y=0;
double u1= (m11*x + m12*y + m13)/(m31*x + m32*y + m33);
double v1= (m21*x + m22*y + m23)/(m31*x + m32*y + m33);
x=0;
y=h;
double u2= (m11*x + m12*y + m13)/(m31*x + m32*y + m33);
double v2= (m21*x + m22*y + m23)/(m31*x + m32*y + m33);
x=w;
y=h;
double u3= (m11*x + m12*y + m13)/(m31*x + m32*y + m33);
double v3= (m21*x + m22*y + m23)/(m31*x + m32*y + m33);
int left= MAX(MAX(u0,u2),0);
int right= MIN(MIN(u1,u3),w);
int top= MAX(MAX(v0,v1),0);
int bottom= MIN(MIN(v2,v3),h);
ASSERT(left<right&&top<bottom); // throw message?
if (left<right&&top<bottom)
{
IplImage* dst= cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, src->nChannels);
cvWarpAffine(src, dst, warp_mat/*, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll(0)*/);
if (crop) // crop and resize to initial size
{
IplImage* dst_crop= cvCreateImage(cvSize(right-left, bottom-top), IPL_DEPTH_8U, src->nChannels);
cvSetImageROI(dst,cvRect(left,top,right-left,bottom-top));
cvCopy(dst,dst_crop);
cvReleaseImage(&dst);
cvReleaseMat(&warp_mat);
//ver1
//return dst_crop;
// ver2 resize
IplImage* out= cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, src->nChannels);
cvResize(dst_crop,out);
cvReleaseImage(&dst_crop);
return out;
}
else
{
/*cvLine( dst, cvPoint(left,top),cvPoint(left, bottom), cvScalar(0, 0, 255, 0) ,1,CV_AA);
cvLine( dst, cvPoint(right,top),cvPoint(right, bottom), cvScalar(0, 0, 255, 0) ,1,CV_AA);
cvLine( dst, cvPoint(left,top),cvPoint(right, top), cvScalar(0, 0, 255, 0) ,1,CV_AA);
cvLine( dst, cvPoint(left,bottom),cvPoint(right, bottom), cvScalar(0, 0, 255, 0) ,1,CV_AA);*/
cvReleaseMat(&warp_mat);
return dst;
}
}
else
{
return NULL; //assert?
}
}