I'm developing painting app [taking reference from GLPaint app] for iPhone and iPad.
I am working on brush effect. I want to get brush effect for my paint app as shown in Image1
I've got brush stroke similar to image 2
I am using following code for brush texture:
CGImageRef brushImage;
CGContextRef brushContext;
GLubyte *brushData;
size_t width, height;
if(UI_USER_INTERFACE_IDIOM() == UIUserInterfaceIdiomPad)
{
brushImage = [UIImage imageNamed:#"flower#2x.png"].CGImage;
}
else {
brushImage = [UIImage imageNamed:#"flower.png"].CGImage;
}
width = CGImageGetWidth(brushImage) ;
height = CGImageGetHeight(brushImage) ;
if(brushImage) {
brushData = (GLubyte *) calloc(width * height * 4, sizeof(GLubyte));
brushContext = CGBitmapContextCreate(brushData, width, height, 8, width * 4, CGImageGetColorSpace(brushImage),kCGImageAlphaPremultipliedLast);
CGContextDrawImage(brushContext, CGRectMake(0.0, 0.0, (CGFloat)width, (CGFloat)height), brushImage);
CGContextRelease(brushContext);
glGenTextures(1, &brushTexture);
glBindTexture(GL_TEXTURE_2D, brushTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, brushData);
free(brushData);
}
CGFloat scale;
scale = self.contentScaleFactor;
glMatrixMode(GL_PROJECTION);
CGRect frame = self.bounds;
glLoadIdentity();
glOrthof(0, (frame.size.width) * scale, 0, (frame.size.height) * scale, -1, 1);
glViewport(0, 0, (frame.size.width) * scale, (frame.size.height) * scale);
glMatrixMode(GL_MODELVIEW);
glDisable(GL_DITHER);
glEnable(GL_BLEND);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_POINT_SPRITE_OES);
glTexEnvf(GL_POINT_SPRITE_OES, GL_COORD_REPLACE_OES, GL_TRUE);
glPointSize(width / kBrushScale);
// Define a starting color
HSL2RGB((CGFloat) 0.0 / (CGFloat)kPaletteSize, kSaturation, kLuminosity, &components[0], &components[1], &components[2]);
glColor4f(components[0] * kBrushOpacity, components[1] * kBrushOpacity, components[2] * kBrushOpacity, kBrushOpacity);
I've been searching for code related to different paint brush stroke but I cannot find any code. Help me to get "desired" brush stroke similar to image1.
Do not use GL_POINT_SPRITE_OES mode. Draw sprites via standard triangles. Then it needs to bind sprite texture coords to the target output coords and make sprite texture repeatable.
Assuming sprite texture size is 32x32. The default texture coords is in rect {{0,0},{1.0,1.0}}. For drawing sprite at {x,y} position, it needs to use sprite texture coord based on rect {{(x%32)/32.0, (y%32)/32.0},{1.0, 1.0}}. This way it prevents sprite content from smudging.
Related
I am beginning to learn OpenGL ES 1.1 for iPhone and would like to draw a 2D image in orthographic projection behind a few 3D objects. Using Jeff Lamarche's tutorials and the book Pro OpenGL ES for iPhone and I've come up with the following couple methods to attempt to do this. If I disable the call to drawSunInRect the 3D objects are rendered just fine and I can move them with touch controls etc. If I uncomment that call and try to draw the sun image, the image appears in the CGRect I supply, but I cannot see any of my other 3D objects - the rest of the screen is black. I've tried to disable/enable depth testing in various places, pass different parameters to glOrthof(), and move around the rectangle but I keep getting the sun image only when the drawSunInRect method is called. I'm assuming it is covering my 3D objects.
// Draw Sun in Rect and with Depth
- (void)drawSunInRect:(CGRect)rect withDepth:(float)depth {
// Get screen bounds
CGRect frame = [[UIScreen mainScreen] bounds];
// Calculate vertices from passed CGRect and depth
GLfloat vertices[] =
{
rect.origin.x, rect.origin.y, depth,
rect.origin.x + rect.size.width , rect.origin.y, depth,
rect.origin.x, rect.size.height+rect.origin.y , depth,
rect.origin.x + rect.size.width , rect.size.height+rect.origin.y ,depth
};
// Map the texture coords - no repeating
static GLfloat textureCoords[] =
{
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
1.0, 1.0
};
// Disable DEPTH test and setup Ortho
glDisable(GL_DEPTH_TEST);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glOrthof(0,frame.size.width,frame.size.height,0,0.1f,1000.0);
// Enable blending and configure
glEnable(GL_BLEND);
glBlendFunc(GL_ONE,GL_ONE_MINUS_SRC_ALPHA);
// Enable VERTEX and TEXTURE client states
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// Enable Textures
glEnable(GL_TEXTURE_2D);
// Projection Matrix Mode for ortho and reset
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
// No lighting or Depth Mask for 2D - no Culling
glDisable(GL_LIGHTING);
glDepthMask(GL_FALSE);
glDisable(GL_CULL_FACE);
glDisableClientState(GL_COLOR_ARRAY);
// Define ortho
glOrthof(0,frame.size.width,frame.size.height,0,0.1f,1000);
// From Jeff Lamarche Tutorials
// glOrthof(-1.0, // Left
// 1.0, // Right
// -1.0 / (rect.size.width / rect.size.height), // Bottom
// 1.0 / (rect.size.width / rect.size.height), // Top
// 0.01, // Near
// 10000.0); // Far
// Setup Model View Matrix
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
// Bind and draw the Texture
glBindTexture(GL_TEXTURE_2D,sunInt);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glTexCoordPointer(2, GL_FLOAT,0,textureCoords);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glMatrixMode(GL_MODELVIEW);
glPopMatrix();
glMatrixMode(GL_PROJECTION);
glPopMatrix();
// Re-enable lighting
glEnable(GL_LIGHTING);
glDisable(GL_TEXTURE_2D);
glDisable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDepthMask(GL_TRUE);
}
// Override the draw in rect function
- (void)glkView:(GLKView *)view drawInRect:(CGRect)rect
{
// Initialize and init the rotation stuff for the object
// Identity Matrix
glLoadIdentity();
static GLfloat rot = 0.0;
// Clear any remnants in the drawing buffers
// and fill the background with black
glClearColor(0.0f,0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Render the Sun - 2D Sun Image in CGRect
//[self drawSunInRect:CGRectMake(100, 100, 50, 50) withDepth:-30.0]; // 2D Sun Image
// Render the BattleCruiser - 3D Battlecruiser
[self renderTheBattleCruiser];
// Calculate a time interval to use to rotate the cruiser lives
static NSTimeInterval lastDrawTime;
if (lastDrawTime)
{
NSTimeInterval timeSinceLastDraw = [NSDate timeIntervalSinceReferenceDate] - lastDrawTime;
rot += 75 * timeSinceLastDraw;
}
lastDrawTime = [NSDate timeIntervalSinceReferenceDate];
}
UPDATE
I modified the draw sun method to have only 1 push and 1 pop for the GL_PROJECTION. I am still having the same issue. The sun image appears but I cannot see my 3D Objects. I have tried to re-arrange the calls to the render methods so the 3D objects are rendered first but I get the same results. I would appreciate other ideas on how to see my orthographic texture and 3D objects together.
// Draw Sun in Rect and with Depth
- (void)drawSunInRect:(CGRect)rect withDepth:(float)depth {
// Get screen bounds
CGRect frame = [[UIScreen mainScreen] bounds];
// Calculate vertices from passed CGRect and depth
GLfloat vertices[] =
{
rect.origin.x, rect.origin.y, depth,
rect.origin.x + rect.size.width , rect.origin.y, depth,
rect.origin.x, rect.size.height+rect.origin.y , depth,
rect.origin.x + rect.size.width , rect.size.height+rect.origin.y ,depth
};
// Map the texture coords - no repeating
static GLfloat textureCoords[] =
{
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
1.0, 1.0
};
glEnable(GL_BLEND);
glBlendFunc(GL_ONE,GL_ONE_MINUS_SRC_ALPHA);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glDisable(GL_LIGHTING);
glEnable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
glDisableClientState(GL_COLOR_ARRAY);
glOrthof(0,frame.size.width,frame.size.height,0,0,1000);
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
glColor4f(1,1,1,1);
glBindTexture(GL_TEXTURE_2D,sunInt);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glTexCoordPointer(2, GL_FLOAT,0,textureCoords);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glMatrixMode(GL_MODELVIEW);
glPopMatrix();
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glEnable(GL_LIGHTING);
glDisable(GL_TEXTURE_2D);
glDisable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
I realized I was calling setClipping only once in my viewController viewDidLoad. I moved it to my glkView method and now I get my Sun image and my 3d object.
I tried to paint a transparent texture over a sphere, but the transparent areas are not completely transparent. A vivid shade of gray remains. I tried to load a Photoshop generated PNG then paint it on sphere using the code below:
My code to load textures:
- (void) loadPNGTexture: (int)index Name: (NSString*) name{
CGImageRef imageRef = [UIImage imageNamed:[NSString stringWithFormat:#"%#.png",name]].CGImage;
GLsizei width = CGImageGetWidth(imageRef);
GLsizei height = CGImageGetHeight(imageRef);
GLubyte * data = malloc(width * 4 * height);
if (!data)
NSLog(#"error allocating memory for texture loading!");
else {
NSLog(#"Memory allocated for %#", name);
}
NSLog(#"Width : %d, Height :%d",width,height);
CGContextRef cg_context = CGBitmapContextCreate(data, width, height, 8, 4 * width, CGImageGetColorSpace(imageRef), kCGImageAlphaPremultipliedLast);//kCGImageAlphaPremultipliedLast);
CGContextTranslateCTM(cg_context, 0, height);
CGContextScaleCTM(cg_context, 1, -1);
CGContextDrawImage(cg_context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(cg_context);
CGContextSetBlendMode(cg_context, kCGBlendModeCopy); //kCGBlendModeCopy);
glGenTextures(2, m_texture[index]);
glBindTexture(GL_TEXTURE_2D, m_texture[index][0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
free(data);
}
Drawing clouds:
glPushMatrix();
glTranslatef(0, 0, 3 );
glScalef(3.1, 3.1, 3.1);
glRotatef(-1, 0, 0, 1);
glRotatef(90, -1, 0, 0);
glDisable(GL_LIGHTING);
//Load Texture for left side of globe
glBindTexture(GL_TEXTURE_2D, m_texture[CLOUD_TEXTURE][0]);
glVertexPointer(3, GL_FLOAT, sizeof(TexturedVertexData3D), &VertexData[0].vertex);
glNormalPointer(GL_FLOAT, sizeof(TexturedVertexData3D), &VertexData[0].normal);
glTexCoordPointer(2, GL_FLOAT, sizeof(TexturedVertexData3D), &VertexData[0].texCoord);
// draw the sphere
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_COPY);
glDrawArrays(GL_TRIANGLES, 0, 11520);
glEnable(GL_LIGHTING);
glPopMatrix();
This first thing that stands out in your code is the line:
glBlendFunc(GL_SRC_ALPHA, GL_COPY);
The second argument (GL_COPY), is not a valid argument for glBlendFunc.
You might want to change that to something along the lines of
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
I created a new iPhone OpenGL Project in Xcode. I filled my background with triangles and gave them a texture, see below:
CGImageRef spriteImage;
CGContextRef spriteContext;
GLubyte *spriteData;
size_t width, height;
// Sets up matrices and transforms for OpenGL ES
glViewport(0, 0, backingWidth, backingHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//glRotatef(-90,0,0,1);
glOrthof(-1.0f, 1.0f, -1.5f, 1.5f, -1.0f, 1.0f);
glMatrixMode(GL_MODELVIEW);
// Clears the view with black
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
// Sets up pointers and enables states needed for using vertex arrays and textures
glVertexPointer(2, GL_FLOAT, 0, vertices);
glEnableClientState(GL_VERTEX_ARRAY);
//glColorPointer(4, GL_FLOAT, 0, triangleColors);
//glColor4f(0.0f,1.0f,0.0f,1.0f);
//glEnableClientState(GL_COLOR_ARRAY);
glTexCoordPointer(2, GL_FLOAT, 0, spriteTexcoords);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// Creates a Core Graphics image from an image file
spriteImage = [UIImage imageNamed:#"Bild.png"].CGImage;
// Get the width and height of the image
width = CGImageGetWidth(spriteImage);
height = CGImageGetHeight(spriteImage);
// Texture dimensions must be a power of 2. If you write an application that allows users to supply an image,
// you'll want to add code that checks the dimensions and takes appropriate action if they are not a power of 2.
if(spriteImage) {
// Allocated memory needed for the bitmap context
spriteData = (GLubyte *) calloc(width * height * 4, sizeof(GLubyte));
// Uses the bitmap creation function provided by the Core Graphics framework.
spriteContext = CGBitmapContextCreate(spriteData, width, height, 8, width * 4, CGImageGetColorSpace(spriteImage), kCGImageAlphaPremultipliedLast);
// After you create the context, you can draw the sprite image to the context.
CGContextDrawImage(spriteContext, CGRectMake(0.0, 0.0, (CGFloat)width, (CGFloat)height), spriteImage);
// You don't need the context at this point, so you need to release it to avoid memory leaks.
CGContextRelease(spriteContext);
// Use OpenGL ES to generate a name for the texture.
glGenTextures(1, &spriteTexture);
// Bind the texture name.
glBindTexture(GL_TEXTURE_2D, spriteTexture);
// Set the texture parameters to use a minifying filter and a linear filer (weighted average)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// Specify a 2D texture image, providing the a pointer to the image data in memory
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, spriteData);
// Release the image data
free(spriteData);
// Enable use of the texture
glEnable(GL_TEXTURE_2D);
// Set a blending function to use
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
// Enable blending
glEnable(GL_BLEND);
I have got two questions, bc. I am not so familiar with OpenGL.
I want to write a method, which I give two points as parameters and I want a Line between these two points to be drawn above my triangles (background).
- (void) drawLineFromPoint1:(CGPoint)point1 toPoint2:(CGPoint)point2 {
GLfloat triangle[] = { //Just example points
0.0f, 0.0f,
0.1f, 0.0f,
0.1f, 0.0f,
0.1f, 0.1f
};
GLfloat triangleColors[] = {
0.5f, 0.5f, 0.5f, 1.0f
};
//now draw the triangle
}
Something like that. Now I want to have a 2nd method, which erases this line (and not the background)
My drawing method looks like this:
- (void)drawView
{
// Make sure that you are drawing to the current context
[EAGLContext setCurrentContext:context];
glBindFramebufferOES(GL_FRAMEBUFFER_OES, viewFramebuffer);
glClear(GL_COLOR_BUFFER_BIT);
glDrawElements(GL_TRIANGLES, number_vertices, GL_UNSIGNED_SHORT, indices);
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
}
Would be great if you can give e some hints/help,
cheers
The conventional approach would be to redraw everything whenever you move or erase a line.
Well, I got it to work. I just missed to set the Vertex-Pointer in my drawView to my triangles. This here now works:
- (void)drawView
{
[EAGLContext setCurrentContext:context];
glBindFramebufferOES(GL_FRAMEBUFFER_OES, viewFramebuffer);
glClear(GL_COLOR_BUFFER_BIT);
glVertexPointer(2, GL_FLOAT, 0, vertices);
glDrawElements(GL_TRIANGLES, number_vertices, GL_UNSIGNED_SHORT, indices);
[self drawLines];
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
}
- (void) drawLines{
glDisable(GL_TEXTURE_2D);
GLfloat points[4];
for (Dataset *data in buttons) {
CGPoint s = [data screenPosition];
CGPoint p = [data slot];
points[0] = (GLfloat)(768-s.y);
points[1] = (GLfloat)(1024-s.x);
points[2] = (GLfloat)(768-p.y);
points[3] = (GLfloat)(1024-p.x);
glVertexPointer(2, GL_FLOAT, 0, points);
glDrawArrays(GL_LINE_STRIP, 0, 2);
}
glEnable(GL_TEXTURE_2D);
}
I am trying to draw a rectangle using the GLPaint example project provided by apple. I have tried modifying the vertices but cannot get a rectangle to appear on the screen. The finger painting works perfectly. Am I missing something in my renderRect method?
- (void)renderRect {
[EAGLContext setCurrentContext:context];
glBindFramebufferOES(GL_FRAMEBUFFER_OES, viewFramebuffer);
// Replace the implementation of this method to do your own custom drawing.
static const GLfloat squareVertices[] = {
-0.5f, -0.33f,
0.5f, -0.33f,
-0.5f, 0.33f,
0.5f, 0.33f,
};
static float transY = 0.0f;
glTranslatef(0.0f, (GLfloat)(sinf(transY)/2.0f), 0.0f);
// Render the vertex array
glVertexPointer(2, GL_FLOAT, 0, squareVertices);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
// Display the buffer
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
}
The rest of the project is set up stock to allow drawing on the screen but just for reference these are the gl settings that are set.
// Set the view's scale factor
self.contentScaleFactor = 1.0;
// Setup OpenGL states
glMatrixMode(GL_PROJECTION);
CGRect frame = self.bounds;
CGFloat scale = self.contentScaleFactor;
// Setup the view port in Pixels
glOrthof(0, frame.size.width * scale, 0, frame.size.height * scale, -1, 1);
glViewport(0, 0, frame.size.width * scale, frame.size.height * scale);
glMatrixMode(GL_MODELVIEW);
glDisable(GL_DITHER);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glEnable(GL_BLEND);
// Set a blending function appropriate for premultiplied alpha pixel data
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_POINT_SPRITE_OES);
glTexEnvf(GL_POINT_SPRITE_OES, GL_COORD_REPLACE_OES, GL_TRUE);
glPointSize(width / brushScale);
static const GLfloat squareVertices[] = {
30.0f, 300.0f,//-0.5f, -0.33f,
280.0f, 300.0f,//0.5f, -0.33f,
30.0f, 170.0f,//-0.5f, 0.33f,
280.0f, 170.0f,//0.5f, 0.33f,
};
That's definitely too much. OpenGL has normalized screen coords in range [-1..1]. So you have to convert device coords to normalized ones.
Issues are:
(1) the following code:
glMatrixMode(GL_PROJECTION);
CGRect frame = self.bounds;
CGFloat scale = self.contentScaleFactor;
// Setup the view port in Pixels
glOrthof(0, frame.size.width * scale, 0, frame.size.height * scale, -1, 1);
glViewport(0, 0, frame.size.width * scale, frame.size.height * scale);
Establishes that the on-screen coordinates range from (0, 0) in the lower left to frame.size in the upper right. In other words, one OpenGL unit is one iPhone point. So your array of:
static const GLfloat squareVertices[] = {
-0.5f, -0.33f,
0.5f, -0.33f,
-0.5f, 0.33f,
0.5f, 0.33f,
};
Is less than 1 pixel in size.
(2) you have the following in the setup:
brushImage = [UIImage imageNamed:#"Particle.png"].CGImage;
/* ...brushImage eventually becomes the current texture... */
glEnable(GL_TEXTURE_2D);
You subsequently fail to supply texture coordinates for your quad. Probably you want to disable GL_TEXTURE_2D.
So the following:
static const GLfloat squareVertices[] = {
0.0f, 0.0f,
0.0, 10.0f,
90.0, 0.0f,
90.0f, 10.0f,
};
glDisable(GL_TEXTURE_2D);
glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
// Render the vertex array
glVertexPointer(2, GL_FLOAT, 0, squareVertices);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
Will produce a white quad 90 points wide and 10 points tlal in the lower left of the screen.
I am modifying GLPaint to use a different background, so in this case it is white. Anyway the existing stamp they are using assumes the background is black, so I made a new background with an alpha channel. When I draw on the canvas it is still black, what gives? When I actually draw, I just bind the texture and it works. Something is wrong in this initialization.
Here is the photo
- (id)initWithCoder:(NSCoder*)coder
{
CGImageRef brushImage;
CGContextRef brushContext;
GLubyte *brushData;
size_t width, height;
if (self = [super initWithCoder:coder])
{
CAEAGLLayer *eaglLayer = (CAEAGLLayer *)self.layer;
eaglLayer.opaque = YES;
// In this application, we want to retain the EAGLDrawable contents after a call to presentRenderbuffer.
eaglLayer.drawableProperties = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:YES], kEAGLDrawablePropertyRetainedBacking, kEAGLColorFormatRGBA8, kEAGLDrawablePropertyColorFormat, nil];
context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES1];
if (!context || ![EAGLContext setCurrentContext:context]) {
[self release];
return nil;
}
// Create a texture from an image
// First create a UIImage object from the data in a image file, and then extract the Core Graphics image
brushImage = [UIImage imageNamed:#"test.png"].CGImage;
// Get the width and height of the image
width = CGImageGetWidth(brushImage);
height = CGImageGetHeight(brushImage);
// Texture dimensions must be a power of 2. If you write an application that allows users to supply an image,
// you'll want to add code that checks the dimensions and takes appropriate action if they are not a power of 2.
// Make sure the image exists
if(brushImage)
{
brushData = (GLubyte *) calloc(width * height * 4, sizeof(GLubyte));
brushContext = CGBitmapContextCreate(brushData, width, width, 8, width * 4, CGImageGetColorSpace(brushImage), kCGImageAlphaPremultipliedLast);
CGContextDrawImage(brushContext, CGRectMake(0.0, 0.0, (CGFloat)width, (CGFloat)height), brushImage);
CGContextRelease(brushContext);
glGenTextures(1, &brushTexture);
glBindTexture(GL_TEXTURE_2D, brushTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, brushData);
free(brushData);
}
//Set up OpenGL states
glMatrixMode(GL_PROJECTION);
CGRect frame = self.bounds;
glOrthof(0, frame.size.width, 0, frame.size.height, -1, 1);
glViewport(0, 0, frame.size.width, frame.size.height);
glMatrixMode(GL_MODELVIEW);
glDisable(GL_DITHER);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_DST_ALPHA);
glEnable(GL_POINT_SPRITE_OES);
glTexEnvf(GL_POINT_SPRITE_OES, GL_COORD_REPLACE_OES, GL_TRUE);
glPointSize(width / kBrushScale);
}
return self;
}
Your line here:
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_DST_ALPHA);
Needs to be:
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
One minus the destination alpha can be 0, since the dst alpha is usually 1.