Related
I have a square image of size 320x320, from which I create an OpenGL texture. I use the most basic Vertext and Fragment shaders and I want to display the texture in the entire view. The view (EAGLView derived from UIView as found in many OpenGL iOS samples) is also of size 320x320.
The problem is, the image is drawn on the top left corner, covering only around 50% of the entire view. It does not cover 100% of the view. I don't know why?
Here is my code:
position = glGetAttribLocation(m_shaderProgram, "position");
inputTextureCoordinate = glGetAttribLocation(m_shaderProgram, "inputTextureCoordinate");
inputImageTexture = glGetUniformLocation(m_shaderProgram, "inputImageTexture");
static const GLfloat textureCoordinates[] = {
0.0f, 1.0f,
1.0f, 1.0f,
0.0f, 0.0f,
1.0f, 0.0f,
};
static const GLfloat imageVertices[] = {
-1.0f, -1.0f,
1.0f, -1.0f,
-1.0f, 1.0f,
1.0f, 1.0f,
};
[EAGLContext setCurrentContext:context];
glViewport(0, 0, backingWidth, backingHeight); // These are 320, 320
glUseProgram(m_shaderProgram);
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, sourceTextureID); // The texture is also of size 320x320
glUniform1i(inputImageTexture, 2);
glVertexAttribPointer(position, 2, GL_FLOAT, 0, 0, imageVertices);
glVertexAttribPointer(textureCoordinate, 2, GL_FLOAT, 0, 0, textureCoordinates);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
Vertext Shader.
attribute vec4 position;
attribute vec4 inputTextureCoordinate;
varying vec2 textureCoordinate;
void main()
{
gl_Position = position;
textureCoordinate = inputTextureCoordinate.xy;
}
Fragment Shader.
varying highp vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
void main()
{
gl_FragColor = texture2D(inputImageTexture, textureCoordinate);
}
So the problem is that the texture's dimensions were not 2's power. So we need to scale the textureCoordinates accordingly. Inserting following lines solved the problem...
GLfloat textureCoordinates[] = {
0.0f, 1.0f,
1.0f, 1.0f,
0.0f, 0.0f,
1.0f, 0.0f,
};
float nearest2sPower = 2;
if (nearest2sPower < backingWidth) {
while (nearest2sPower < backingWidth) {
nearest2sPower *= 2;
}
}
verticalFlipTextureCoordinates[2] = backingWidth/nearest2sPower;
verticalFlipTextureCoordinates[6] = backingWidth/nearest2sPower;
nearest2sPower = 2;
if (nearest2sPower < backingWidth) {
while (nearest2sPower < backingWidth) {
nearest2sPower *= 2;
}
}
verticalFlipTextureCoordinates[1] = backingHeight/nearest2sPower;
verticalFlipTextureCoordinates[3] = backingHeight/nearest2sPower;
I am trying to set a .png file as my background in OpenGL, with the hopes of drawing more objects on top of it. I have an image that is a 320x480 RGB image. I setup my vertices, as follows:
- (void)drawView:(GLView*)view;
{
static GLfloat rot = 0.0;
glColor4f(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// Vertices
static const Vertex3D vertices[] = {
{-1.0, 1.0, -0.0},
{ 1.0, 1.0, -0.0},
{-1.0, -1.0, -0.0},
{ 1.0, -1.0, -0.0}
};
// Normals
static const Vector3D normals[] = {
{0.0f, 0.0f, 1.0f},
{0.0f, 0.0f, 1.0f},
{0.0f, 0.0f, 1.0f},
{0.0f, 0.0f, 1.0f}
};
// Tex Coords
static const GLfloat texCoords[] = {
0.1875f, 0.03125f, // image is 320x480, but texture is 512x512. Image is offset by 96x32
0.1875f, 0.96875,
0.8125f, 0.03125f,
0.8125f, 0.96875
};
glLoadIdentity();
glTranslatef(0.0, 0.0, -3.0);
glScalef(320.0f / 512.0f, 480.0f / 512.0f, 1.0f);
glBindTexture(GL_TEXTURE_2D, texture[0]);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glNormalPointer(GL_FLOAT, 0, normals);
glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
static NSTimeInterval lastDrawTime;
if (lastDrawTime)
{
NSTimeInterval timeSinceLastDraw = [NSDate timeIntervalSinceReferenceDate] - lastDrawTime;
rot+= 60 * timeSinceLastDraw;
}
lastDrawTime = [NSDate timeIntervalSinceReferenceDate];
}
-(void)setupView:(GLView*)view
{
const GLfloat zNear = 0.01, zFar = 1000.0, fieldOfView = 45.0;
GLfloat size;
glEnable(GL_DEPTH_TEST);
glMatrixMode(GL_PROJECTION);
size = zNear * tanf(DEGREES_TO_RADIANS(fieldOfView) / 2.0);
CGRect rect = view.bounds;
glFrustumf(-size, size, -size / (rect.size.width / rect.size.height), size /
(rect.size.width / rect.size.height), zNear, zFar);
glViewport(0, 0, rect.size.width, rect.size.height);
glMatrixMode(GL_MODELVIEW);
// Turn necessary features on
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_SRC_COLOR);
//glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
// Bind the number of textures we need, in this case one.
glGenTextures(1, &texture[0]);
glBindTexture(GL_TEXTURE_2D, texture[0]);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
NSString *path = [[NSBundle mainBundle] pathForResource:#"texture" ofType:#"png"];
NSData *texData = [[NSData alloc] initWithContentsOfFile:path];
UIImage *image = [[UIImage alloc] initWithData:texData];
if (image == nil)
NSLog(#"Do real error checking here");
GLuint width = CGImageGetWidth(image.CGImage);
GLuint height = CGImageGetHeight(image.CGImage);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
void *imageData = malloc( height * width * 4 );
CGContextRef context = CGBitmapContextCreate( imageData, width, height, 8, 4 * width, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big );
// Flip the Y-axis
CGContextTranslateCTM (context, 0, height);
CGContextScaleCTM (context, 1.0, -1.0);
CGColorSpaceRelease( colorSpace );
CGContextClearRect( context, CGRectMake( 0, 0, width, height ) );
CGContextDrawImage( context, CGRectMake( 0, 0, width, height ), image.CGImage );
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData);
CGContextRelease(context);
free(imageData);
[image release];
[texData release];
glEnable(GL_LIGHTING);
// Turn the first light on
glEnable(GL_LIGHT0);
// Define the ambient component of the first light
static const Color3D light0Ambient[] = {{0.4, 0.4, 0.4, 1.0}};
glLightfv(GL_LIGHT0, GL_AMBIENT, (const GLfloat *)light0Ambient);
// Define the diffuse component of the first light
static const Color3D light0Diffuse[] = {{0.8, 0.8, 0.8, 1.0}};
glLightfv(GL_LIGHT0, GL_DIFFUSE, (const GLfloat *)light0Diffuse);
// Define the position of the first light
// const GLfloat light0Position[] = {10.0, 10.0, 10.0};
static const Vertex3D light0Position[] = {{10.0, 10.0, 10.0}};
glLightfv(GL_LIGHT0, GL_POSITION, (const GLfloat *)light0Position);
}
when I run the code it shows as less than a full screen grey box, instead of my image. Any ideas what I am doing wrong ?
I am trying to draw a rectangle using the GLPaint example project provided by apple. I have tried modifying the vertices but cannot get a rectangle to appear on the screen. The finger painting works perfectly. Am I missing something in my renderRect method?
- (void)renderRect {
[EAGLContext setCurrentContext:context];
glBindFramebufferOES(GL_FRAMEBUFFER_OES, viewFramebuffer);
// Replace the implementation of this method to do your own custom drawing.
static const GLfloat squareVertices[] = {
-0.5f, -0.33f,
0.5f, -0.33f,
-0.5f, 0.33f,
0.5f, 0.33f,
};
static float transY = 0.0f;
glTranslatef(0.0f, (GLfloat)(sinf(transY)/2.0f), 0.0f);
// Render the vertex array
glVertexPointer(2, GL_FLOAT, 0, squareVertices);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
// Display the buffer
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
}
The rest of the project is set up stock to allow drawing on the screen but just for reference these are the gl settings that are set.
// Set the view's scale factor
self.contentScaleFactor = 1.0;
// Setup OpenGL states
glMatrixMode(GL_PROJECTION);
CGRect frame = self.bounds;
CGFloat scale = self.contentScaleFactor;
// Setup the view port in Pixels
glOrthof(0, frame.size.width * scale, 0, frame.size.height * scale, -1, 1);
glViewport(0, 0, frame.size.width * scale, frame.size.height * scale);
glMatrixMode(GL_MODELVIEW);
glDisable(GL_DITHER);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glEnable(GL_BLEND);
// Set a blending function appropriate for premultiplied alpha pixel data
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_POINT_SPRITE_OES);
glTexEnvf(GL_POINT_SPRITE_OES, GL_COORD_REPLACE_OES, GL_TRUE);
glPointSize(width / brushScale);
static const GLfloat squareVertices[] = {
30.0f, 300.0f,//-0.5f, -0.33f,
280.0f, 300.0f,//0.5f, -0.33f,
30.0f, 170.0f,//-0.5f, 0.33f,
280.0f, 170.0f,//0.5f, 0.33f,
};
That's definitely too much. OpenGL has normalized screen coords in range [-1..1]. So you have to convert device coords to normalized ones.
Issues are:
(1) the following code:
glMatrixMode(GL_PROJECTION);
CGRect frame = self.bounds;
CGFloat scale = self.contentScaleFactor;
// Setup the view port in Pixels
glOrthof(0, frame.size.width * scale, 0, frame.size.height * scale, -1, 1);
glViewport(0, 0, frame.size.width * scale, frame.size.height * scale);
Establishes that the on-screen coordinates range from (0, 0) in the lower left to frame.size in the upper right. In other words, one OpenGL unit is one iPhone point. So your array of:
static const GLfloat squareVertices[] = {
-0.5f, -0.33f,
0.5f, -0.33f,
-0.5f, 0.33f,
0.5f, 0.33f,
};
Is less than 1 pixel in size.
(2) you have the following in the setup:
brushImage = [UIImage imageNamed:#"Particle.png"].CGImage;
/* ...brushImage eventually becomes the current texture... */
glEnable(GL_TEXTURE_2D);
You subsequently fail to supply texture coordinates for your quad. Probably you want to disable GL_TEXTURE_2D.
So the following:
static const GLfloat squareVertices[] = {
0.0f, 0.0f,
0.0, 10.0f,
90.0, 0.0f,
90.0f, 10.0f,
};
glDisable(GL_TEXTURE_2D);
glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
// Render the vertex array
glVertexPointer(2, GL_FLOAT, 0, squareVertices);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
Will produce a white quad 90 points wide and 10 points tlal in the lower left of the screen.
Having a lot of trouble getting texture maps to work in openGL ES (iphone).
Here's what I've done:
built an array of vertexes
built an array of faces that reference the indices of the array of vertexes for each face
built an array of colors so I can be sure I know which vertex on the cube is which.
All of this following Jeff Lamarche's tutorials. Getting the objects rendering and moving is not a problem.
Now I'm trying to get the cube (actually a tile, narrower in Z that X or Y) to stick a texture on two opposite faces (the others can come later). I have been able to get one face to work, but I am not getting workable results on any other face.
What is the most systematic way to texture map an object in OpenGL ES, and can anyone see where the errors in my code are?
#import "GLViewController.h"
#import "ConstantsAndMacros.h"
#import "OpenGLCommon.h"
#import "Cube.h"
#implementation GLViewController
#synthesize initDone;
#synthesize tileArray;
#synthesize tileRows;
#synthesize tileCols;
#synthesize cubes;
#synthesize gridOffsetX;
#synthesize gridOffsetY;
#synthesize gridOffsetZ;
#synthesize tileSpacing;
- (void)drawView:(UIView *)theView
{
static GLfloat rot = 0.0;
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// This is the same result as using Vertex3D, just faster to type and
// can be made const this way
static const Vertex3D vertices[]= {
{1.0f, -1.0f, 0.2f},
{1.0f, -1.0f, -0.2f},
{1.0f, 1.0f, -0.2f},
{1.0f, 1.0f, 0.2f},
{-1.0f, -1.0f, 0.2f},
{-1.0f, -1.0f, -0.2f},
{-1.0f, 1.0f, -0.2f},
{-1.0f, 1.0f, 0.2f}
};
static const Color3D colors[] = {
{1.0, 0.0, 0.0, 20.0},
{1.0, 1.0, 1.0, 20.0},
{1.0, 1.0, 1.0, 20.0},
{0.0, 0.0, 1.0, 20.0},
{0.0, 1.0, 0.0, 20.0},
{1.0, 1.0, 1.0, 20.0},
{1.0, 1.0, 1.0, 20.0},
{1.0, 1.0, 1.0, 20.0},
};
static const GLubyte cubeFaces[] = {
0, 1, 3,
2, 3, 1,
0, 3, 4,
3, 4, 7, // first main face
2, 1, 6, // second main face
1, 6, 5,
5, 6, 7,
5, 4, 7,
7, 6, 3,
6, 3, 2,
4, 0, 5,
1, 0, 5,
};
static const Vector3D normals[] = {
{0.200000, -0.400000, 0.000000},
{0.400000, -0.200000, -0.400000},
{0.333333, 0.333333, -0.333333},
{0.400000, 0.400000, -0.200000},
{-0.333333, -0.333333, 0.333333},
{-0.400000, -0.400000, -0.200000},
{-0.200000, 0.400000, -0.400000},
{-0.400000, 0.200000, 0.000000},
};
static const GLfloat texCoords[] = {
0.0, 0.0, // texture face
1.0, 1.0,
0.0, 1.0,
1.0, 1.0,
0.0, 0.0,
1.0, 0.0,
0.0, 0.0, // texture face
1.0, 1.0,
1.0, 0.0,
1.0, 0.0,
0.0, 1.0,
1.0, 1.0,
0.0, 0.0, // texture face
1.0, 1.0,
0.0, 1.0,
1.0, 1.0,
0.0, 0.0,
1.0, 0.0,
0.0, 0.0, // texture face
1.0, 1.0,
0.0, 1.0,
1.0, 1.0,
0.0, 0.0,
1.0, 0.0,
0.0, 0.0, // texture face
1.0, 1.0,
0.0, 1.0,
1.0, 1.0,
0.0, 0.0,
1.0, 0.0,
0.0, 0.0, //
1.0, 1.0,
0.0, 1.0,
1.0, 1.0,
0.0, 0.0,
1.0, 0.0,
};
glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
glLoadIdentity();
glClearColor(0.7, 0.7, 0.7, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glColorPointer(4, GL_FLOAT, 0, colors);
glNormalPointer(GL_FLOAT, 0, normals);
glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
NSMutableArray *tempRow;
Cube *tempCube;
for (int i = 1; i <= cubes.tileRows; i++)
{
tempRow = [cubes rowAtIndex:i-1];
for (int j = 1; j <= cubes.tileCols; j++)
{
tempCube = [tempRow objectAtIndex:j-1];
glLoadIdentity();
glTranslatef(gridOffsetX + (tileSpacing * (GLfloat)i), gridOffsetY + (tileSpacing * (GLfloat)j), gridOffsetZ);
glRotatef(rot, 1.0, 0.0, 0);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_BYTE, cubeFaces);
}
}
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
static NSTimeInterval lastDrawTime;
if (lastDrawTime)
{
NSTimeInterval timeSinceLastDraw = [NSDate timeIntervalSinceReferenceDate] - lastDrawTime;
rot+=30 * timeSinceLastDraw;
}
//NSLog(#"rot is %f", rot);
lastDrawTime = [NSDate timeIntervalSinceReferenceDate];
}
-(void)setupView:(GLView*)view
{
initDone = NO;
tileRows = 5;
tileCols = 7;
gridOffsetX = 5.2f;
gridOffsetY = 6.9f;
gridOffsetZ = -14.0;
tileSpacing = -2.15f;
cubes = [[Cubes alloc] initWithRowCount:tileRows colCount: tileCols ];
const GLfloat zNear = 0.01, zFar = 1000.0, fieldOfView = 50.0;
GLfloat size;
glEnable(GL_DEPTH_TEST);
glMatrixMode(GL_PROJECTION);
size = zNear * tanf(DEGREES_TO_RADIANS(fieldOfView) / 2.0);
CGRect rect = view.bounds;
// glOrthof(-5.0, // Left
// 5.0, // Right
// -5.0 / (rect.size.width / rect.size.height), // Bottom
// 5.0 / (rect.size.width / rect.size.height), // Top
// 0.01, // Near
// 10000.0); // Far
glFrustumf(-size, size, -size / (rect.size.width / rect.size.height), size /
(rect.size.width / rect.size.height), zNear, zFar);
glViewport(0, 0, rect.size.width, rect.size.height);
glMatrixMode(GL_MODELVIEW);
glEnable(GL_COLOR_MATERIAL);
// Enable lighting
glEnable(GL_LIGHTING);
// Turn the first light on
glEnable(GL_LIGHT0);
// Define the ambient component of the first light
const GLfloat light0Ambient[] = {0.5, 0.5, 0.5, 1.0};
glLightfv(GL_LIGHT0, GL_AMBIENT, light0Ambient);
// Define the diffuse component of the first light
const GLfloat light0Diffuse[] = {0.7, 0.7, 0.7, 1.0};
glLightfv(GL_LIGHT0, GL_DIFFUSE, light0Diffuse);
// Define the specular component and shininess of the first light
const GLfloat light0Specular[] = {0.7, 0.7, 0.7, 1.0};
const GLfloat light0Shininess = 0.4;
glLightfv(GL_LIGHT0, GL_SPECULAR, light0Specular);
// Define the position of the first light
const GLfloat light0Position[] = {0.0, 10.0, 10.0, 0.0};
glLightfv(GL_LIGHT0, GL_POSITION, light0Position);
// Define a direction vector for the light, this one points right down the Z axis
const GLfloat light0Direction[] = {0.0, 0.0, -1.0};
glLightfv(GL_LIGHT0, GL_SPOT_DIRECTION, light0Direction);
// Define a cutoff angle. This defines a 90° field of vision, since the cutoff
// is number of degrees to each side of an imaginary line drawn from the light's
// position along the vector supplied in GL_SPOT_DIRECTION above
glLightf(GL_LIGHT0, GL_SPOT_CUTOFF, 45.0);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_SRC_COLOR);
glGenTextures(1, &texture[0]);
glBindTexture(GL_TEXTURE_2D, texture[0]);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
NSString *path = [[NSBundle mainBundle] pathForResource:#"a-tile-64" ofType:#"png"];
NSData *texData = [[NSData alloc] initWithContentsOfFile:path];
UIImage *image = [[UIImage alloc] initWithData:texData];
if (image == nil)
NSLog(#"Do real error checking here");
GLuint width = CGImageGetWidth(image.CGImage);
GLuint height = CGImageGetHeight(image.CGImage);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
void *imageData = malloc( height * width * 4 );
CGContextRef context = CGBitmapContextCreate( imageData, width, height, 8, 4 * width, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big );
CGColorSpaceRelease( colorSpace );
CGContextClearRect( context, CGRectMake( 0, 0, width, height ) );
CGContextTranslateCTM( context, 0, height - height );
CGContextDrawImage( context, CGRectMake( 0, 0, width, height ), image.CGImage );
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData);
CGContextRelease(context);
free(imageData);
[image release];
[texData release];
glLoadIdentity();
};
- (void)dealloc
{
[tileArray release];
[cubes release];
[super dealloc];
}
#end
I also kick-started OpenGL ES using Jeff's tutorials.
I would suggest simplifying what you're trying to do. For example:
Forget the Normals
Forget the Colors
Forget the indices
Create a structure that binds the vertices to their attributes
Jeff provides a useful TexturedVertexData3D Struct that does this. You don't have to fill in the normal part if you don't want to.
Then, set up your strides appropriately:
glVertexPointer(3, GL_FLOAT, sizeof(TexturedVertexData3D), &vertices[0]);
glTexCoordPointer(2, GL_FLOAT, sizeof(TexturedVertexData3D), &vertices[0].texCoords);
And use glDrawArrays to draw your object:
glDrawArrays(GL_TRIANGLES, 0, nVertices);
Once you have this working, go ahead and add the normals and colors into the TexturedVertexData3D struct, and set your texture and color pointers appropriately. Then test again, or post an update if things don't work.
At this point you can start to think about how to use indices. Indices don't really make sense until you are rendering thousands of vertices. But, when the time comes, you can get a nice performance increase by using them.
I've been watching this question as I'm learning OpenGL at the moment and will be moving on to OpenGL ES. As nobody has answered yet, I'll give you my thoughts, don't consider this an expert opinion though.
Something to consider is that as it stands, your Vertex, Color & Normal arrays contains 8 'items', however your TexCoord array has 36 'items'. I'm pretty sure when you use glDrawElements with a list of indices, it uses those indices to pick items from ALL of the activated arrays. So the last 28 items of your TexCoord array will never be used, they will be picked out according to the indices specified by cubeFaces. In the tutorial you linked, there are four items in all the arrays, which works nicely for a single face of an object.
However this is a bit of an issue with using indices for 3D objects, because although several vertices are re-used in a cube, their texture co-ordinates will not necessarily be the same for the different triangles they are used for. In fact neither will their normals, so that may be another issue with your code when it comes to lighting the object.
I don't know what the best solution to this is, which is why I'm interested in any other answers to this question... drawing the cube vertex by vertex is an option. I'm also wondering whether it's possible to draw each face of the cube separately, and change the TexCoord array each time. Or perhaps there is some easier or standard way of doing this kind of thing that I am not yet aware of!
I'm new to OpenGL, so I'm sure this is a dummy mistake, but I've read every post, and reviewed sample code, and I can't find a difference, explaining why glFrustum wont display as I'd like it to.
I initialize OpenGL like:
- (void) initOpenGL{
glEnable(GL_DEPTH_TEST);
glMatrixMode(GL_PROJECTION);
//glLoadIdentity();
//glOrthof(0.0f, self.bounds.size.width, self.bounds.size.height, 0.0f, -10.0f, 10.0f);
const GLfloat zNear = -0.1, zFar = 1000.0, fieldOfView = 60.0;
GLfloat size;
size = zNear * tanf(DEGREES_TO_RADIANS(fieldOfView) / 2.0);
// This give us the size of the iPhone display
CGRect rect = self.bounds;
glFrustumf(-size, size, -size / (rect.size.width / rect.size.height), size / (rect.size.width / rect.size.height), zNear, zFar);
glViewport(0, 0, rect.size.width, rect.size.height);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
#if 0
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
#endif
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
#if TARGET_IPHONE_SIMULATOR
glColor4f(0.0, 0.0, 0.0, 0.0f);
#else
glColor4f(0.0, 0.0, 0.0, 0.0f);
#endif
[[Texture2D alloc] initWithImage:[UIImage imageNamed:#"GreenLineTex.png"] filter:GL_LINEAR];
glInitialised = YES;
And my drawing is done like:
- (void)drawView {
if(!glInitialised) {
[self initOpenGL];
}
[EAGLContext setCurrentContext:context];
glBindFramebufferOES(GL_FRAMEBUFFER_OES, viewFramebuffer);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glEnable(GL_TEXTURE_2D);
static const GLfloat texCoords[] = {
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
1.0, 1.0
};
// draw the edges
glEnableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
glBindTexture(GL_TEXTURE_2D, 1);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glBlendFunc(GL_SRC_ALPHA, GL_DST_ALPHA);
for (int i = 0; i < connectionNumber; i++){
Vertex2DSet(&vertices[0], connectionLines[i].lineVertexBeginPoint.x, connectionLines[i].lineVertexBeginPoint.y);
Vertex2DSet(&vertices[1], connectionLines[i].lineVertexBeginPoint.x+connectionLines[i].normalVector.x, connectionLines[i].lineVertexBeginPoint.y+connectionLines[i].normalVector.y);
Vertex2DSet(&vertices[2], connectionLines[i].lineVertexEndPoint.x, connectionLines[i].lineVertexEndPoint.y);
Vertex2DSet(&vertices[3], connectionLines[i].lineVertexEndPoint.x+connectionLines[i].normalVector.x, connectionLines[i].lineVertexEndPoint.y+connectionLines[i].normalVector.y);
glVertexPointer(2, GL_FLOAT, 0, vertices);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
}
Where the block in the for loop is a set of vertices that make up some triangle strips.
If I uncomment the glOrthof() line, then I can see my display, however it's orthographic, and I'd like to move the camera in and out, to change the scaling of the whole scene.
What have I done incorrectly that causes glFrustumf() to display only the clear color?
Short answer: you are looking in the wrong direction.
Long answer:
Your frustum is symmetric while your orthographic matrix isn't. So if your model is set up to be visible in the glOrtho case, it may not be visible with your glFrustum.
Also you shouldn't use glOrtho AND glFrustum together, because the matrices are multiplied and will surely yield a funny projection matrix.
You can use Nate Robins' GL tutors at http://www.xmission.com/~nate/tutors.html to experiment with glFrustum and glOrtho (in the "projection" application).