I am trying to port an application to an embedded system that I am trying to design. The embedded system is Raspberry Pi Zero W - based, and uses a custom Yocto build.
The application to be ported is written with SDL / OpenGLES to my understanding. I have a hard time understanding how to make a connection similar to the following depiction:
SDL APP -----> XServer ($DISPLAY) -------> Framebuffer /dev/fb1 ($FRAMEBUFFER)
System has two displays: One HDMI on /dev/fb0 and One TFT on /dev/fb1. I am trying to run the SDL application on TFT. The following are the steps I do:
First, start an XServer on DISPLAY=:1 that is connected to /dev/fb1:
FRAMEBUFFER=/dev/fb1 xinit /etc/X11/Xsession -- /usr/bin/Xorg :1 -br -pn -nolisten tcp -dpi 100
The first step seems like it's working. I can see LXDE booting up on my TFT screen. Checking the display, I get the correct display resolution:
~/projects# DISPLAY=:1 xrandr -q
xrandr: Failed to get size of gamma for output default
Screen 0: minimum 320 x 240, current 320 x 240, maximum 320 x 240
default connected 320x240+0+0 0mm x 0mm
320x240 0.00*
Second, I would like to start SDL-written application using x11. I am thinking that should work in seeing the application on the TFT. In order to do so, I try:
SDL_VIDEODRIVER=x11 SDL_WINDOWID=1 DISPLAY=:1 ./SDL_App
No matter which display number I choose, it starts on my HDMI display and not on the TFT. So now I am thinking the person who wrote the application hardcoded somethings in the application code:
void init_ogl(void)
{
int32_t success = 0;
EGLBoolean result;
EGLint num_config;
static EGL_DISPMANX_WINDOW_T nativewindow;
DISPMANX_ELEMENT_HANDLE_T dispman_element;
DISPMANX_DISPLAY_HANDLE_T dispman_display;
DISPMANX_UPDATE_HANDLE_T dispman_update;
VC_DISPMANX_ALPHA_T alpha;
VC_RECT_T dst_rect;
VC_RECT_T src_rect;
static const EGLint attribute_list[] =
{
EGL_RED_SIZE, 8,
EGL_GREEN_SIZE, 8,
EGL_BLUE_SIZE, 8,
EGL_ALPHA_SIZE, 8,
EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
EGL_NONE
};
EGLConfig config;
// Get an EGL display connection
display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
assert(display!=EGL_NO_DISPLAY);
// Initialize the EGL display connection
result = eglInitialize(display, NULL, NULL);
assert(EGL_FALSE != result);
// Get an appropriate EGL frame buffer configuration
result = eglChooseConfig(display, attribute_list, &config, 1, &num_config);
assert(EGL_FALSE != result);
// Create an EGL rendering context
context = eglCreateContext(display, config, EGL_NO_CONTEXT, NULL);
assert(context!=EGL_NO_CONTEXT);
// Create an EGL window surface
success = graphics_get_display_size( 0 /* LCD */ , &screen_width, &screen_height);
printf ("Screen width= %d\n", screen_width);
printf ("Screen height= %d\n", screen_height);
assert( success >= 0 );
int32_t zoom = screen_width / GAMEBOY_WIDTH;
int32_t zoom2 = screen_height / GAMEBOY_HEIGHT;
if (zoom2 < zoom)
zoom = zoom2;
int32_t display_width = GAMEBOY_WIDTH * zoom;
int32_t display_height = GAMEBOY_HEIGHT * zoom;
int32_t display_offset_x = (screen_width / 2) - (display_width / 2);
int32_t display_offset_y = (screen_height / 2) - (display_height / 2);
dst_rect.x = 0;
dst_rect.y = 0;
dst_rect.width = screen_width;
dst_rect.height = screen_height;
src_rect.x = 0;
src_rect.y = 0;
src_rect.width = screen_width << 16;
src_rect.height = screen_height << 16;
dispman_display = vc_dispmanx_display_open( 0 /* LCD */ );
dispman_update = vc_dispmanx_update_start( 0 );
alpha.flags = DISPMANX_FLAGS_ALPHA_FIXED_ALL_PIXELS;
alpha.opacity = 255;
alpha.mask = 0;
dispman_element = vc_dispmanx_element_add ( dispman_update, dispman_display,
0/*layer*/, &dst_rect, 0/*src*/,
&src_rect, DISPMANX_PROTECTION_NONE, &alpha, 0/*clamp*/, DISPMANX_NO_ROTATE/*transform*/);
nativewindow.element = dispman_element;
nativewindow.width = screen_width;
nativewindow.height = screen_height;
vc_dispmanx_update_submit_sync( dispman_update );
surface = eglCreateWindowSurface( display, config, &nativewindow, NULL );
assert(surface != EGL_NO_SURFACE);
// Connect the context to the surface
result = eglMakeCurrent(display, surface, surface, context);
assert(EGL_FALSE != result);
eglSwapInterval(display, 1);
glGenTextures(1, &theGBTexture);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, theGBTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 256, 256, 0, GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid*) NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrthof(0.0f, screen_width, screen_height, 0.0f, -1.0f, 1.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glViewport(0.0f, 0.0f, screen_width, screen_height);
quadVerts[0] = display_offset_x;
quadVerts[1] = display_offset_y;
quadVerts[2] = display_offset_x + display_width;
quadVerts[3] = display_offset_y;
quadVerts[4] = display_offset_x + display_width;
quadVerts[5] = display_offset_y + display_height;
quadVerts[6] = display_offset_x;
quadVerts[7] = display_offset_y + display_height;
glVertexPointer(2, GL_SHORT, 0, quadVerts);
glEnableClientState(GL_VERTEX_ARRAY);
glTexCoordPointer(2, GL_FLOAT, 0, kQuadTex);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glClear(GL_COLOR_BUFFER_BIT);
}
void init_sdl(void)
{
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_GAMECONTROLLER) < 0)
{
Log("SDL Error Init: %s", SDL_GetError());
}
theWindow = SDL_CreateWindow("Gearboy", 0, 0, 0, 0, 0);
if (theWindow == NULL)
{
Log("SDL Error Video: %s", SDL_GetError());
}
...
}
At first glance, I discovered two lines: vc_dispmanx_display_open( 0 /* LCD */ ); and graphics_get_display_size( 0 /* LCD */ , &screen_width, &screen_height);. I tried changing the display parameter to 1, thinking that it refers to DISPLAY=:1, but it did not do anything. I added logs for screen resolution, and I get 1920x1080, which is the resolution of the HDMI display. I think there must be something with the EGL portion of the code that I'm missing. What should I do right now? Is my logic fair enough or am I missing something?
Any requirements, please let me know. Any guidance regarding the issue is much appreciated.
EDIT: I saw that some people use the following, but raspberry pi zero can not find EGL/eglvivante.h for fb functions so I am unable to compile it:
int fbnum = 1; // fbnum is an integer for /dev/fb1 fbnum = 1
EGLNativeDisplayType native_display = fbGetDisplayByIndex(fbnum);
EGLNativeWindowType native_window = fbCreateWindow(native_display, 0, 0, 0, 0);
display = eglGetDisplay(native_display);
Related
I want to create a grid which displays error message if the file does not exist:
/* size */
s = get_file_size(recording->filename);
if (s > 0) {
size = g_format_size_full(s, G_FORMAT_SIZE_LONG_FORMAT);
gtk_label_set_text(GTK_LABEL(size_lbl), size);
gtk_widget_hide(error)
g_free(size);
} else {
size = g_strdup(_("Import Errors"));
gtk_widget_show(error)
}
in gtk grid cannot set type of "error" element to display message as in screen shot:
grid = gtk_grid_new();
gtk_grid_set_row_spacing(GTK_GRID(grid), 6);
gtk_grid_set_column_spacing(GTK_GRID(grid), 15);
gtk_container_set_border_width(GTK_CONTAINER(grid), 6);
s_lbl = gtk_label_new(Size:);
size_lbl = gtk_label_new("");
error = ?
error_pixmap = gtk_image_new_from_stock(GTK_STOCK_DIALOG_ERROR, GTK_ICON_SIZE_SMALL_TOOLBAR);
gtk_container_add(GTK_CONTAINER(error), error_pixmap);
gtk_grid_attach(GTK_GRID(grid), s_lbl, 0, 0, 1, 1);
gtk_grid_attach(GTK_GRID(grid), error, 1, 0, 1, 1);
gtk_grid_attach(GTK_GRID(grid), size_lbl, 2, 0, 1, 1);
For any help, thanks.
Screen shot:
[Size]:
Use a GtkBox with GTK_ORIENTATION_HORIZONTAL, or an adjoining cell in the GtkGrid.
I'd like to use Vertex Buffer Objects (VBOs) to improved my rendering of somewhat complicated models in my Open GL ES 1.1 game for iPhone. After reading several posts on SO and this (http://playcontrol.net/ewing/jibberjabber/opengl_vertex_buffer_object.html) tutorial, I'm still having trouble understanding VBOs and how to implement them given my Cheetah 3D export model format. Could someone please give me an example of implementing a VBO and using it to draw my vertices with the given data structure and explain the syntax? I greatly appreciate any help!
#define body_vertexcount 434
#define body_polygoncount 780
// The vertex data is saved in the following format:
// u0,v0,normalx0,normaly0,normalz0,x0,y0,z0
float body_vertex[body_vertexcount][8]={
{0.03333, 0.00000, -0.68652, -0.51763, 0.51063, 0.40972, -0.25028, -1.31418},
{...},
{...}
}
GLushort body_index[body_polygoncount][3]={
{0, 1, 2},
{2, 3, 0}
}
I've written the following code with the help of Chapter 9 from Pro OpenGL ES (Appress). I'm getting EXC_BAD_ACCESS with the DrawElements command and I'm not sure why. Could someone please shed some light? Thanks -
// First thing we do is create / setup the index buffer
glGenBuffers(1, &bodyIBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bodyIBO);
// For constrast, instead of glBufferSubData and glMapBuffer,
// we can directly supply the data in one-shot
glBufferData(GL_ELEMENT_ARRAY_BUFFER, body_polygoncount*sizeof(GLubyte), body_index, GL_STATIC_DRAW);
// Define our data structure
int numXYZElements = 3;
int numNormalElements = 3;
int numTextureCoordElements = 2;
long totalXYZBytes;
long totalNormalBytes;
long totalTexCoordinateBytes;
int numBytesPerVertex;
// Allocate a new buffer
glGenBuffers(1, &bodyVBO);
// Bind the buffer object to use
glBindBuffer(GL_ARRAY_BUFFER, bodyVBO);
// Tally up the size of the data components
numBytesPerVertex = numXYZElements;
numBytesPerVertex += numNormalElements;
numBytesPerVertex += numTextureCoordElements;
numBytesPerVertex *= sizeof(GLfloat);
// Actually allocate memory on the GPU ( Data is static here )
glBufferData(GL_ARRAY_BUFFER, numBytesPerVertex * body_vertexcount, 0, GL_STATIC_DRAW);
// Upload data to the cache ( memory mapping )
GLubyte *vboBuffer = (GLubyte *)glMapBufferOES(GL_ARRAY_BUFFER, GL_WRITE_ONLY_OES);
// Caclulate the total number of bytes for each data type
totalXYZBytes = numXYZElements * body_vertexcount * sizeof(GLfloat);
totalNormalBytes = numNormalElements * body_vertexcount * sizeof(GLfloat);
totalTexCoordinateBytes = numTextureCoordElements * body_vertexcount * sizeof(GLfloat);
// Set the total bytes property for the body
self.bodyTotalBytes = totalXYZBytes + totalNormalBytes + totalTexCoordinateBytes;
// Setup the copy of the buffer(s) using memcpy()
memcpy(vboBuffer, body_vertex, self.bodyTotalBytes);
// Perform the actual copy
glUnmapBufferOES(GL_ARRAY_BUFFER);
Here are the drawing commands where I'm getting the exception:
// Activate the VBOs to draw
glBindBuffer(GL_ARRAY_BUFFER, bodyVBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bodyIBO);
// Setup drawing
glMatrixMode(GL_MODELVIEW);
glEnable(GL_TEXTURE_2D);
glClientActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D,lightGreyInt);
// Setup pointers
glVertexPointer(3, GL_FLOAT, sizeof(vertexStruct), (char *)NULL + 0 );
glTexCoordPointer(2, GL_FLOAT, sizeof(vertexStruct), (char *)NULL + 12 );
glNormalPointer(GL_FLOAT, sizeof(vertexStruct), (char *)NULL + 24 );
// Now draw the body
glDrawElements(GL_TRIANGLES, body_polygoncount,GL_UNSIGNED_SHORT, (GLvoid*)((char*)NULL));
//glDrawElements(GL_TRIANGLES, body_polygoncount, GL_UNSIGNED_SHORT, nil);
//glDrawElements(GL_TRIANGLES,body_polygoncount*3,GL_UNSIGNED_SHORT,body_index);
Well, first of all your index buffer is too small, you don't just have body_polygoncount indices but body_polygoncount * 3. You also messed up the type, since they're shorts, you need GLushort and not GLubyte, so it should be
glBufferData(GL_ELEMENT_ARRAY_BUFFER, body_polygoncount*3*sizeof(GLushort),
body_index, GL_STATIC_DRAW);
And then, you messed up the offsets of your attributes, since your data contains first the texture coords, then the normal and then the position for each vertex, it should be
glVertexPointer(3, GL_FLOAT, sizeof(vertexStruct), (char *)NULL + 20 ); //3rd, after 5*4 byte
glTexCoordPointer(2, GL_FLOAT, sizeof(vertexStruct), (char *)NULL + 0 ); //1st
glNormalPointer(GL_FLOAT, sizeof(vertexStruct), (char *)NULL + 8 ); //2nd, after 2*4 bytes
And finally, in a glDrawElements call you don't give the number of triangles, but the number of elements (indices), so it should be
glDrawElements(GL_TRIANGLES, body_polygoncount*3,
GL_UNSIGNED_SHORT, (GLvoid*)((char*)NULL));
Otherwise your code looks reasonable (of course the mapping was senseless and you could have just used glBufferData again, but I guess you did it for learning) and if you understood everything it does, there is nothing more to it.
But I wonder that all these errors would also have occurred if you had just used client side vertex arrays without VBOs and I thought OpenGL ES 1.1 doesn't have immediate mode glBegin/glEnd. So I wonder why your game worked previously without VBOs if you're not aware of these errors.
I am trying to follow the sample code on encoding in the ffmpeg document and successfully build a application to encode and generate a mp4 file but I face the following problems:
1) I am using the H263 for encoding but I can only set the width and height of the AVCodecContext to 176x144, for other case (like 720x480 or 640x480) it will return fail.
2) I can't play the output mp4 file by using the default Android player, isn't it support H263 mp4 file? p.s. I can play it by using other player
3) Is there any sample code on encoding other video frame to make a new video (which mean decode the video and encode it back in different quality setting, also i would like to modify the frame content)?
Here is my code, thanks!
JNIEXPORT jint JNICALL Java_com_ffmpeg_encoder_FFEncoder_nativeEncoder(JNIEnv* env, jobject thiz, jstring filename){
LOGI("nativeEncoder()");
avcodec_register_all();
avcodec_init();
av_register_all();
AVCodec *codec;
AVCodecContext *codecCtx;
int i;
int out_size;
int size;
int x;
int y;
int output_buffer_size;
FILE *file;
AVFrame *picture;
uint8_t *output_buffer;
uint8_t *picture_buffer;
/* Manual Variables */
int l;
int fps = 30;
int videoLength = 5;
/* find the H263 video encoder */
codec = avcodec_find_encoder(CODEC_ID_H263);
if (!codec) {
LOGI("avcodec_find_encoder() run fail.");
}
codecCtx = avcodec_alloc_context();
picture = avcodec_alloc_frame();
/* put sample parameters */
codecCtx->bit_rate = 400000;
/* resolution must be a multiple of two */
codecCtx->width = 176;
codecCtx->height = 144;
/* frames per second */
codecCtx->time_base = (AVRational){1,fps};
codecCtx->pix_fmt = PIX_FMT_YUV420P;
codecCtx->codec_id = CODEC_ID_H263;
codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
/* open it */
if (avcodec_open(codecCtx, codec) < 0) {
LOGI("avcodec_open() run fail.");
}
const char* mfileName = (*env)->GetStringUTFChars(env, filename, 0);
file = fopen(mfileName, "wb");
if (!file) {
LOGI("fopen() run fail.");
}
(*env)->ReleaseStringUTFChars(env, filename, mfileName);
/* alloc image and output buffer */
output_buffer_size = 100000;
output_buffer = malloc(output_buffer_size);
size = codecCtx->width * codecCtx->height;
picture_buffer = malloc((size * 3) / 2); /* size for YUV 420 */
picture->data[0] = picture_buffer;
picture->data[1] = picture->data[0] + size;
picture->data[2] = picture->data[1] + size / 4;
picture->linesize[0] = codecCtx->width;
picture->linesize[1] = codecCtx->width / 2;
picture->linesize[2] = codecCtx->width / 2;
for(l=0;l<videoLength;l++){
//encode 1 second of video
for(i=0;i<fps;i++) {
//prepare a dummy image YCbCr
//Y
for(y=0;y<codecCtx->height;y++) {
for(x=0;x<codecCtx->width;x++) {
picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
}
}
//Cb and Cr
for(y=0;y<codecCtx->height/2;y++) {
for(x=0;x<codecCtx->width/2;x++) {
picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
}
}
//encode the image
out_size = avcodec_encode_video(codecCtx, output_buffer, output_buffer_size, picture);
fwrite(output_buffer, 1, out_size, file);
}
//get the delayed frames
for(; out_size; i++) {
out_size = avcodec_encode_video(codecCtx, output_buffer, output_buffer_size, NULL);
fwrite(output_buffer, 1, out_size, file);
}
}
//add sequence end code to have a real mpeg file
output_buffer[0] = 0x00;
output_buffer[1] = 0x00;
output_buffer[2] = 0x01;
output_buffer[3] = 0xb7;
fwrite(output_buffer, 1, 4, file);
fclose(file);
free(picture_buffer);
free(output_buffer);
avcodec_close(codecCtx);
av_free(codecCtx);
av_free(picture);
LOGI("finish");
return 0; }
H263 accepts only certain resolutions:
128 x 96
176 x 144
352 x 288
704 x 576
1408 x 1152
It will fail with anything else.
The code supplied in the question (I used it myself at first) seems to only generate a very rudimentary, if any, container format.
I found that this example, http://cekirdek.pardus.org.tr/~ismail/ffmpeg-docs/output-example_8c-source.html, worked much better as it creates a real container for the video and audio streams. My video is now displayable on the Android device.
I am trying to encode series of images to one video file. I am using code from api-example.c, its works, but it gives me weird green colors in video. I know, I need to convert my RGB images to YUV, I found some solution, but its doesn't works, the colors is not green but very strange, so thats the code:
// Register all formats and codecs
av_register_all();
AVCodec *codec;
AVCodecContext *c= NULL;
int i, out_size, size, outbuf_size;
FILE *f;
AVFrame *picture;
uint8_t *outbuf;
printf("Video encoding\n");
/* find the mpeg video encoder */
codec = avcodec_find_encoder(CODEC_ID_MPEG2VIDEO);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
c= avcodec_alloc_context();
picture= avcodec_alloc_frame();
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* frames per second */
c->time_base= (AVRational){1,25};
c->gop_size = 10; /* emit one intra frame every ten frames */
c->max_b_frames=1;
c->pix_fmt = PIX_FMT_YUV420P;
/* open it */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "could not open %s\n", filename);
exit(1);
}
/* alloc image and output buffer */
outbuf_size = 100000;
outbuf = malloc(outbuf_size);
size = c->width * c->height;
#pragma mark -
AVFrame* outpic = avcodec_alloc_frame();
int nbytes = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height);
//create buffer for the output image
uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);
#pragma mark -
for(i=1;i<77;i++) {
fflush(stdout);
int numBytes = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height);
uint8_t *buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
UIImage *image = [UIImage imageNamed:[NSString stringWithFormat:#"10%d", i]];
CGImageRef newCgImage = [image CGImage];
CGDataProviderRef dataProvider = CGImageGetDataProvider(newCgImage);
CFDataRef bitmapData = CGDataProviderCopyData(dataProvider);
buffer = (uint8_t *)CFDataGetBytePtr(bitmapData);
avpicture_fill((AVPicture*)picture, buffer, PIX_FMT_RGB8, c->width, c->height);
avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, c->width, c->height);
struct SwsContext* fooContext = sws_getContext(c->width, c->height,
PIX_FMT_RGB8,
c->width, c->height,
PIX_FMT_YUV420P,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
//perform the conversion
sws_scale(fooContext, picture->data, picture->linesize, 0, c->height, outpic->data, outpic->linesize);
// Here is where I try to convert to YUV
/* encode the image */
out_size = avcodec_encode_video(c, outbuf, outbuf_size, outpic);
printf("encoding frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
free(buffer);
buffer = NULL;
}
/* get the delayed frames */
for(; out_size; i++) {
fflush(stdout);
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
printf("write frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, outbuf_size, f);
}
/* add sequence end code to have a real mpeg file */
outbuf[0] = 0x00;
outbuf[1] = 0x00;
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
fclose(f);
free(outbuf);
avcodec_close(c);
av_free(c);
av_free(picture);
printf("\n");
Please give me advice how to fix that problem.
You can see article http://unick-soft.ru/Articles.cgi?id=20. But it is article on Russian, but it includes code samples and VS Example.
Has anyone found a fix for this? I am seeing the green video problem on the decode side. That is, when I decode incoming PIX_FMT_YUV420 packets and then swsscale them to PIX_FMT_RGBA.
Thanks!
EDIT:
The green images are probably due to an arm optimization backfiring. I used this to fix the problem in my case:
http://ffmpeg-users.933282.n4.nabble.com/green-distorded-output-image-on-iPhone-td2231805.html
I guess the idea is to not specify any architecture (the config will you a warning about the architecture being unknown but you can continue to 'make' anyway). That way, the arm optimizations are not used. There maybe a slight performance hit (if any), but atleast it works! :)
I think the problem is most likely that you are using PIX_FMT_RGB8 as your input pixel format. This does not mean 8 bits per channel like the commonly used 24-bit RGB or 32-bit ARGB. It means 8 bits per pixel, meaning that all three color channels are housed in a single byte. I am guessing that this is not the format of your image since it is quite uncommon, so you need to use PIX_FMT_RGB24 or PIX_FMT_RGB32 depending on whether or not your input image has an alpha channel. See this documentation page for info on the pixel formats.
How much memory will consume a texture loaded with this method ?
With this method, will a 1024x1024 texture consume 4MB anyway ? ( regardless of loading it as RGBA4444 ) ?
-(void)loadTexture:(NSString*)nombre {
CGImageRef textureImage =[UIImage imageWithContentsOfFile:[[NSBundle mainBundle] pathForResource:nombre ofType:nil]].CGImage;
if (textureImage == nil) {
NSLog(#"Failed to load texture image");
return;
}
// Dimensiones de nuestra imagen
imageSizeX= CGImageGetWidth(textureImage);
imageSizeY= CGImageGetHeight(textureImage);
textureWidth = NextPowerOfTwo(imageSizeX);
textureHeight = NextPowerOfTwo(imageSizeY);
GLubyte *textureData = (GLubyte *)calloc(1,textureWidth * textureHeight * 4);
CGContextRef textureContext = CGBitmapContextCreate(textureData, textureWidth,textureHeight,8, textureWidth * 4,CGImageGetColorSpace(textureImage),kCGImageAlphaPremultipliedLast );
CGContextDrawImage(textureContext, CGRectMake(0.0, 0.0, (float)textureWidth, (float)textureHeight), textureImage);
/**************** Convert data to RGBA4444******************/
//Convert "RRRRRRRRRGGGGGGGGBBBBBBBBAAAAAAAA" to "RRRRGGGGBBBBAAAA"
void *tempData = malloc(textureWidth * textureHeight * 2);
unsigned int* inPixel32 = (unsigned int*)textureData;
unsigned short* outPixel16 = (unsigned short*)tempData;
for(int i = 0; i < textureWidth * textureHeight ; ++i, ++inPixel32)
*outPixel16++ =
((((*inPixel32 >> 0) & 0xFF) >> 4) << 12) | // R
((((*inPixel32 >> 8) & 0xFF) >> 4) << 8) | // G
((((*inPixel32 >> 16) & 0xFF) >> 4) << 4) | // B
((((*inPixel32 >> 24) & 0xFF) >> 4) << 0); // A
free(textureData);
textureData = tempData;
// Ya no necesitamos el bitmap, lo liberamos
CGContextRelease(textureContext);
glGenTextures(1, &textures[0]);
glBindTexture(GL_TEXTURE_2D, textures[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, textureWidth, textureHeight, 0, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, textureData);
free(textureData);
//glEnable(GL_BLEND);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
The GL ES 1.1.12 specification (pdf) has this to say on the subject:
The GL stores the resulting texture with internal component resolutions of its
own choosing. The allocation of internal component resolution may vary based
on any TexImage2D parameter (except target), but the allocation must not be a
function of any other state and cannot be changed once established.
However, according to the iphone dev center, RGBA4444 is natively supported. So I would expect it to consume 2MB with your code snippet. Do you have reasons to doubt it's using 2MB ?
In OpenGL, there is no way to find out how much video-memory a texture uses.
Also there is no single answer to the question to start with. Such details depend on graphic card, driver version, platform..
From the OpenGL ES Programming Guide for iOS:
If your application cannot use
compressed textures, consider using a
lower precision pixel format. A
texture in RGB565, RGBA5551, or
RGBA4444 format uses half the memory
of a texture in RGBA8888 format. Use
RGBA8888 only when your application
needs that level of quality.
Above that paragraph, they also really recommend using PVRTC compressed textures, because they save even more memory.