AVCaptureSession Display is White (no Video) - iphone

I am using an AVCaptureSession with an output setting of:
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange];
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key];
[captureOutput setVideoSettings:videoSettings];
My AVCaptureVideoPreviewLayer is displaying fine but I need more than this since I have had no success getting a screen shot using the AVCaptureVideoPreviewLayer. So when creating a CGContextRef within the captureOutput delegate, I am using these settings
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, width * 4, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
I am no longer receiving an 'unsupported parameter combination' warning, but the display is just plain white.
I should add that when I change
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange];
to
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA];
Everything works fine. What is my problem?

Take a look to the following code (it uses FullVideoRange instead) which converts "by hand" a bi-planar video frame into a RGB.
CVPixelBufferLockBaseAddress(imageBuffer, 0);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
uint8_t *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
CVPlanarPixelBufferInfo_YCbCrBiPlanar *bufferInfo = (CVPlanarPixelBufferInfo_YCbCrBiPlanar *)baseAddress;
NSUInteger yOffset = EndianU32_BtoN(bufferInfo->componentInfoY.offset);
NSUInteger yPitch = EndianU32_BtoN(bufferInfo->componentInfoY.rowBytes);
NSUInteger cbCrOffset = EndianU32_BtoN(bufferInfo->componentInfoCbCr.offset);
NSUInteger cbCrPitch = EndianU32_BtoN(bufferInfo->componentInfoCbCr.rowBytes);
uint8_t *rgbBuffer = malloc(width * height * 3);
uint8_t *yBuffer = baseAddress + yOffset;
uint8_t *cbCrBuffer = baseAddress + cbCrOffset;
for(int y = 0; y < height; y++)
{
uint8_t *rgbBufferLine = &rgbBuffer[y * width * 3];
uint8_t *yBufferLine = &yBuffer[y * yPitch];
uint8_t *cbCrBufferLine = &cbCrBuffer[(y >> 1) * cbCrPitch];
for(int x = 0; x < width; x++)
{
uint8_t y = yBufferLine[x];
uint8_t cb = cbCrBufferLine[x & ~1];
uint8_t cr = cbCrBufferLine[x | 1];
uint8_t *rgbOutput = &rgbBufferLine[x*3];
// from ITU-R BT.601, rounded to integers
rgbOutput[0] = (298 * (y - 16) + 409 * cr - 223) >> 8;
rgbOutput[1] = (298 * (y - 16) + 100 * cb + 208 * cr + 136) >> 8;
rgbOutput[2] = (298 * (y - 16) + 516 * cb - 277) >> 8;
}
}
The following link may be useful as well to better understand this video format:
http://blog.csdn.net/yiheng_l/article/details/3790219#yuvformats_nv12

Take a look at InvasiveCode's tutorial. It shows how to use Accelerate and CoreImage framework to process the Y channel

Related

How to compare images using opencv in iOS (iPhone)

I want to compare 2 images taken by iPhone camera in my project. I am using OpenCV for doing that. Is there any other better way to do that?
If i got the % similarity, It will be great.
I am using OpenCV following code for image comparison:
-(void)opencvImageCompare{
NSMutableArray *valuesArray=[[NSMutableArray alloc]init];
IplImage *img = [self CreateIplImageFromUIImage:imageView.image];
// always check camera image
if(img == 0) {
printf("Cannot load camera img");
}
IplImage *res;
CvPoint minloc, maxloc;
double minval, maxval;
double values;
UIImage *imageTocompare = [UIImage imageNamed:#"MyImageName"];
IplImage *imageTocompareIpl = [self CreateIplImageFromUIImage:imageTocompare];
// always check server image
if(imageTocompareIpl == 0) {
printf("Cannot load serverIplImageArray image");
}
if(img->width-imageTocompareIpl->width<=0 && img->height-imageTocompareIpl->height<=0){
int balWidth=imageTocompareIpl->width-img->width;
int balHeight=imageTocompareIpl->height-img->height;
img->width=img->width+balWidth+100;
img->height=img->height+balHeight+100;
}
CvSize size = cvSize(
img->width - imageTocompareIpl->width + 1,
img->height - imageTocompareIpl->height + 1
);
res = cvCreateImage(size, IPL_DEPTH_32F, 1);
// CV_TM_SQDIFF CV_TM_SQDIFF_NORMED
// CV_TM_CCORR CV_TM_CCORR_NORMED
// CV_TM_CCOEFF CV_TM_CCOEFF_NORMED
cvMatchTemplate(img, imageTocompareIpl, res,CV_TM_CCOEFF);
cvMinMaxLoc(res, &minval, &maxval, &minloc, &maxloc, 0);
printf("\n value %f", maxval-minval);
values=maxval-minval;
NSString *valString=[NSString stringWithFormat:#"%f",values];
[valuesArray addObject:valString];
weedObject.values=[valString doubleValue];
printf("\n------------------------------");
cvReleaseImage(&imageTocompareIpl);
cvReleaseImage(&res);
}
cvReleaseImage(&img);
}
For the same image I am getting non zero result (14956...) and if I pass different image its crash.
try this code, It compares images bit by bit, ie 100%
UIImage *img1 = // Some photo;
UIImage *img2 = // Some photo;
NSData *imgdata1 = UIImagePNGRepresentation(img1);
NSData *imgdata2 = UIImagePNGRepresentation(img2);
if ([imgdata1 isEqualToData:imgdata2])
{
NSLog(#"Same Image");
}
Try this code - it compare images pixel by pixel
-(void)removeindicator :(UIImage *)image
{
for (int i =0; i < [imageArray count]; i++)
{
CGFloat width =100.0f;
CGFloat height=100.0f;
CGSize newSize1 = CGSizeMake(width, height); //whaterver size
UIGraphicsBeginImageContext(newSize1);
[[UIImage imageNamed:[imageArray objectAtIndex:i]] drawInRect:CGRectMake(0, 0, newSize1.width, newSize1.height)];
UIImage *newImage1 = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
UIImageView *imageview_camera=(UIImageView *)[self.view viewWithTag:-3];
CGSize newSize2 = CGSizeMake(width, height); //whaterver size
UIGraphicsBeginImageContext(newSize2);
[[imageview_camera image] drawInRect:CGRectMake(0, 0, newSize2.width, newSize2.height)];
UIImage *newImage2 = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
float numDifferences = 0.0f;
float totalCompares = width * height;
NSArray *img1RGB=[[NSArray alloc]init];
NSArray *img2RGB=[[NSArray alloc]init];
for (int yCoord = 0; yCoord < height; yCoord += 1)
{
for (int xCoord = 0; xCoord < width; xCoord += 1)
{
img1RGB = [self getRGBAsFromImage:newImage1 atX:xCoord andY:yCoord];
img2RGB = [self getRGBAsFromImage:newImage2 atX:xCoord andY:yCoord];
if (([[img1RGB objectAtIndex:0]floatValue] - [[img2RGB objectAtIndex:0]floatValue]) == 0 || ([[img1RGB objectAtIndex:1]floatValue] - [[img2RGB objectAtIndex:1]floatValue]) == 0 || ([[img1RGB objectAtIndex:2]floatValue] - [[img2RGB objectAtIndex:2]floatValue]) == 0)
{
//one or more pixel components differs by 10% or more
numDifferences++;
}
}
}
// It will show result in percentage at last
CGFloat percentage_similar=((numDifferences*100)/totalCompares);
NSString *str=NULL;
if (percentage_similar>=10.0f)
{
str=[[NSString alloc]initWithString:[NSString stringWithFormat:#"%i%# Identical", (int)((numDifferences*100)/totalCompares),#"%"]];
UIAlertView *alertview=[[UIAlertView alloc]initWithTitle:#"i-App" message:[NSString stringWithFormat:#"%# Images are same",str] delegate:nil cancelButtonTitle:#"Ok" otherButtonTitles:nil];
[alertview show];
break;
}
else
{
str=[[NSString alloc]initWithString:[NSString stringWithFormat:#"Result: %i%# Identical",(int)((numDifferences*100)/totalCompares),#"%"]];
UIAlertView *alertview=[[UIAlertView alloc]initWithTitle:#"i-App" message:[NSString stringWithFormat:#"%# Images are not same",str] delegate:nil cancelButtonTitle:#"OK" otherButtonTitles:nil];
[alertview show];
}
}
}
-(NSArray*)getRGBAsFromImage:(UIImage*)image atX:(int)xx andY:(int)yy
{
//NSArray *result = [[NSArray alloc]init];
// First get the image into your data buffer
CGImageRef imageRef = [image CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = (unsigned char*) calloc(height * width * 4, sizeof(unsigned char));
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
// Now your rawData contains the image data in the RGBA8888 pixel format.
int byteIndex = (bytesPerRow * yy) + xx * bytesPerPixel;
// for (int ii = 0 ; ii < count ; ++ii)
// {
CGFloat red = (rawData[byteIndex] * 1.0) / 255.0;
CGFloat green = (rawData[byteIndex + 1] * 1.0) / 255.0;
CGFloat blue = (rawData[byteIndex + 2] * 1.0) / 255.0;
//CGFloat alpha = (rawData[byteIndex + 3] * 1.0) / 255.0;
byteIndex += 4;
// UIColor *acolor = [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
//}
free(rawData);
NSArray *result = [NSArray arrayWithObjects:
[NSNumber numberWithFloat:red],
[NSNumber numberWithFloat:green],
[NSNumber numberWithFloat:blue],nil];
return result;
}

Save pixels dynamically created and retrieve it back successfully

I am trying to insert pixels dynamically to a PNG image file and try to retrieve it back without any alteration to pixels I saved using the code below. But I'm not successful in doing so. Can someone help me point where the problem is? Thanks.
// Original pixels for debugging only
NSString *sPixels = #"12345678";
const char *cpPixels8 = sPixels.UTF8String;
char *cpPixelsStore = calloc(sPixels.length + 1, 1);
strncpy(cpPixelsStore, cpPixels8, sPixels.length);
unsigned int r,g,b,a;
for(int j = 0; j < sPixels.length; j += 4)
{
r = cpPixelsStore[j+0];
g = cpPixelsStore[j+1];
b = cpPixelsStore[j+2];
a = cpPixelsStore[j+3];
printf("r:0x%X g:0x%X b:0x%X a:0x%X\n", r, g, b, a);
}
int width = 2;
int height = 1;
int bytesPerRow = 4 * width;
int bitsPerComponent = 8;
int bitsPerPixel = 32;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrder32Host | kCGImageAlphaLast;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, cpPixelsStore, (bytesPerRow * height), NULL);
CGImageRef imRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
CGDataProviderRelease(provider);
UIImage *imNewTemp = [UIImage imageWithCGImage:imRef];
NSData *datPNG = UIImagePNGRepresentation(imNewTemp);
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *sFile = [documentsDirectory stringByAppendingPathComponent:#"Pic.png"];
[datPNG writeToFile:sFile atomically:YES];
CGImageRelease(imRef);
// Cross verify save
UIImage *imTemp = [UIImage imageWithContentsOfFile:sFile];
NSData *datImagePixels = (__bridge NSData *)CGDataProviderCopyData(CGImageGetDataProvider(imTemp.CGImage));
unsigned char *ucpPixelBytes = (unsigned char *)[datImagePixels bytes];
for(int j = 0; j < datImagePixels.length; j += 4)
{
r = ucpPixelBytes[j+0];
g = ucpPixelBytes[j+1];
b = ucpPixelBytes[j+2];
a = ucpPixelBytes[j+3];
printf("r:0x%X g:0x%X b:0x%X a:0x%X\n", r, g, b, a);
}
Initial printf returns this during creation:
r:0x31 g:0x32 b:0x33 a:0x34
r:0x35 g:0x36 b:0x37 a:0x38
printf after saving and retrieving the file gives this output:
r:0xA g:0xA b:0xA a:0x31
r:0xC g:0xB b:0xB a:0x35
I'm lost in translation. Please help.
NSData is immutable - you need a NSMutableData object, then you use 'mutableBytes' to get the pointer. NSMutableData *nData = [NSMutableData dataWithData:datImagePixels];
Now you have the data to create a NEW image with your modifications. You can use the Quartz method CGImageCreate() to get a CGImageRef, and from that a UIImage.

NSData -> UIImage -> NSData

I have an NSData object, which contains RGB values for an image. I want to turn that into a UIImage (given the width and the height). Then I want to convert that UIImage back into an NSData object identical to the one I started with.
Please help me I've been trying for hours now.
Here are some things I've looked at/tried though probably didn't too them right cause it didn't work:
CGImageCreate
CGBitmapContextCreateWithData
CGBitmapContextGetData
CGDataProviderCopyData(CGImageGetDataProvider(imageRef))
Thanks!
Here is my current code:
NSMutableData *rgb; //made earlier
double len = (double)[rgb length];
len /= 3;
len += 0.5;
len = (int)len;
int diff = len*3-[rgb length];
NSString *str = #"a";
NSData *a = [str dataUsingEncoding:NSUTF8StringEncoding];
for(int i =0; i < diff; i++) {
[toEncode appendData:a]; //so if my data is RGBRGBR it will turn into RGBRGBR(97)(97)
}
size_t width = (size_t)len;
size_t height = 1;
CGContextRef ctx;
CFDataRef m_DataRef;
m_DataRef = (__bridge CFDataRef)toEncode;
UInt8 * m_PixelBuf = (UInt8 *) CFDataGetBytePtr(m_DataRef);
vImage_Buffer src;
src.data = m_PixelBuf;
src.width = width;
src.height = height;
src.rowBytes = 3 * width;
vImage_Buffer dst;
dst.width = width;
dst.height = height;
dst.rowBytes = 4 * width;
vImageConvert_RGB888toARGB8888(&src, NULL, 0, &dst, NO, kvImageNoFlags);
// free(m_PixelBuf);
// m_PixelBuf = dst.data;
// NSUInteger lenB = len * (4/3);
/*
UInt8 * m_Pixel = malloc(sizeof(UInt8) * lenB);
int z = 0;
for(int i = 0; i < lenB; i++) {
if(i % 4==0) {
m_Pixel[i] = 0;
} else {
m_Pixel[i] = m_PixelBuf[z];
z++;
}
}*/
// Byte tmpByte;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
/*
ctx = CGBitmapContextCreate(m_PixelBuf,
width,
height,
8,
4*width,
colorSpace,
kCGImageAlphaPremultipliedFirst );
*/
size_t w = (size_t)len;
ctx = CGBitmapContextCreate(dst.data,
w,
height,
8,
4*width,
colorSpace,
kCGImageAlphaNoneSkipFirst );
CGImageRef imageRef = CGBitmapContextCreateImage (ctx);
UIImage* rawImage = [UIImage imageWithCGImage:imageRef];
CGContextRelease(ctx);
I get this error:<Error>: CGBitmapContextCreate: invalid data bytes/row: should be at least 324 for 8 integer bits/component, 3 components, kCGImageAlphaNoneSkipFirst.
TO UIImage from NSData:
[UIImage imageWithData:]
More on UIImage
TO NSData from UIImage:
UIImage *img = [UIImage imageNamed:#"some.png"];
NSData *dataObj = UIImageJPEGRepresentation(img, 1.0);
More on UIImageJPEGRepresentation()
The basic procedure would be to create a bitmap context using CGBitmapContextCreateWithData and then creating a CGImageRef from that with CGBitmapContextCreateImage. The parameters for creating the bitmap context depend on how your raw data is laid out in memory. Not all kinds of raw data are supported by Quartz.
The documentation on CGBitmapContextCreateWithData is quite detailed, and this is the most challenging part, getting the CGImageRef from the context and wrapping that in a UIImage (imageWithCGImage:) is trivial afterwards.
If your data is in RGB format, you will want to create a bitmap using CGBitmapContextCreate with CGColorSpaceCreateDeviceRGB and using kCGImageAlphaNone.
The error stating that rowBytes needs to be at least 324; dividing that by 4 is 81 which implies that 'width' is smaller than 'w', and that w=81. The two values should match.
Try replacing width and w with a small number like 5 to validate this. I would also note that you should be allocating dst.data via malloc prior to calling vImageConvert_RGB888toARGB8888.
Consider using CGImageCreate() instead of creating a bitmapcontext:
// this will automatically free() dst.data when destData is dealloc
NSData *destData = [NSData dataWithBytesNoCopy:dst.data length:4*width*height];
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)destData);
CGImageRef imageRef = CGImageCreate(width,
height,
8, //bits per component
8*4, //bits per pixel
4*width, //bytesPerRow
colorSpace, //colorspace
kCGImageAlphaNoneSkipFirst,
provider, //CGDataProviderRef
NULL, //decode
false, //should interpolate
kCGRenderingIntentDefault //intent
);

Setting the contrast of an image in iPhone

I'm using the following code for setting the contrast of an image based on the slider value. The slider range is from 0.0f to 2.0f. Its runnig fine in simulator but its crashing on device due to low memory. Can any one help me what's wrong in this code.
Thanks in advance....
-(void)contrast:(float)value{
CGImageRef img=refImage.CGImage;
CFDataRef dataref=CopyImagePixels(img);
UInt8 *data=(UInt8 *)CFDataGetBytePtr(dataref);
int length=CFDataGetLength(dataref);
for(int index=0;index<length;index+=4){
int alphaCount = data[index+0];
int redCount = data[index+1];
int greenCount = data[index+2];
int blueCount = data[index+3];
alphaCount = ((alphaCount-128)*value ) + 128;
if (alphaCount < 0) alphaCount = 0; if (alphaCount>255) alphaCount =255;
data[index+0] = (Byte) alphaCount;
redCount = ((redCount-128)*value ) + 128;
if (redCount < 0) redCount = 0; if (redCount>255) redCount =255;
data[index+1] = (Byte) redCount;
greenCount = ((greenCount-128)*value ) + 128;
if (greenCount < 0) greenCount = 0; if (greenCount>255) greenCount =255;
data[index+2] = (Byte) greenCount;
blueCount = ((blueCount-128)*value ) + 128;
if (blueCount < 0) blueCount = 0; if (blueCount>255) blueCount =255;
data[index+3] = (Byte) blueCount;
}
size_t width=CGImageGetWidth(img);
size_t height=CGImageGetHeight(img);
size_t bitsPerComponent=CGImageGetBitsPerComponent(img);
size_t bitsPerPixel=CGImageGetBitsPerPixel(img);
size_t bytesPerRow=CGImageGetBytesPerRow(img);
CGColorSpaceRef colorspace=CGImageGetColorSpace(img);
CGBitmapInfo bitmapInfo=CGImageGetBitmapInfo(img);
CFDataRef newData=CFDataCreate(NULL,data,length);
CGDataProviderRef provider=CGDataProviderCreateWithCFData(newData);
CGImageRef newImg=CGImageCreate(width,height,bitsPerComponent,bitsPerPixel,bytesPerRow,colorspace,bitmapInfo,provider,NULL,true,kCGRenderingIntentDefault);
[ImgView setImage:[UIImage imageWithCGImage:newImg]];
CGImageRelease(newImg);
CGDataProviderRelease(provider);
}
You might have some memory leaks.
Any function that is CF...Create() will need to have corresponding CFRelease() called on it. The following has no release:
CFDataRef newData=CFDataCreate(NULL,data,length);
I think you need to clean up after copying as well:
CFDataRef dataref=CopyImagePixels(img);
You cleaned up after newImg okay. Can't see any other leaks but check your Create/Copying that you clean up the memory afterwards.

How to get AVFrame(ffmpeg) from NSImage/UIImage

I'd like to convert NSImage/UIImage to AVFrame(ffmpeg).
I found a example code.
http://lists.mplayerhq.hu/pipermail/libav-user/2010-April/004550.html
but this code doesn't work.
I tried another approach.
AVFrame *frame = avcodec_alloc_frame();
int numBytes = avpicture_get_size(PIX_FMT_YUV420P, outputWidth, outputHeight);
uint8_t *buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
avpicture_fill((AVPicture *)frame, buffer, PIX_FMT_YUV420P, outputWidth, outputHeight);
//UIImage *image = … smothing … ;
NSImage *image = … smothing … ;
//CGImageRef newCgImage = image.CGImage;
CGImageRef newCgImage = [image CGImageForProposedRect:nil context:nil hints:nil];
//NSBitmapImageRep* bm = [NSBitmapImageRep imageRepWithData:[image TIFFRepresentation]];
//CGImageRef newCgImage = [bm CGImage];
size_t w = CGImageGetWidth(newCgImage);
size_t h = CGImageGetHeight(cgImage);
CGDataProviderRef dataProvider = CGImageGetDataProvider(newCgImage);
CFDataRef bitmapData = CGDataProviderCopyData(dataProvider);
uint8_t *buffer = (uint8_t *)CFDataGetBytePtr(bitmapData);
frame->linesize[0] = w;
int y, x;
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
int z = y * w + x;
frame->data[0][z] = buffer[z];
}
}
but this AVFrame give me green picture.
Please let me know how can i get it.
Thanks.
following is additional.
I tried again with paying attention color format.
I found example to conver RGB to YUM.
How to perform RGB->YUV conversion in C/C++?
new code is like this.but,still doesn't work…
#import <Foundation/Foundation.h>
#import <AppKit/AppKit.h>
#import <libavutil/avstring.h>
#import <libavcodec/avcodec.h>
#import <libavformat/avformat.h>
#import <libswscale/swscale.h>
int main(int argc, char *argv[]) {
NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
int i;
int outputWidth = 480; //size must be same size of arg
int outputHeight = 360; //size must be same size of arg
av_register_all();
AVOutputFormat *format = av_guess_format("mp4", NULL, NULL);
if(!format) return -1;
AVFormatContext *outFormatCtx = avformat_alloc_context();
if(!outFormatCtx) return -1;
outFormatCtx->oformat = format;
av_strlcpy(outFormatCtx->filename, "test.mov", sizeof(outFormatCtx->filename));
AVStream *vstream = av_new_stream(outFormatCtx, 0);
if(!vstream) return -1;
enum CodecID codec_id = av_guess_codec(outFormatCtx->oformat,
NULL,
outFormatCtx->filename,
NULL, CODEC_TYPE_VIDEO);
AVCodec *ovCodec = avcodec_find_encoder(codec_id);
if(!ovCodec) return -1;
AVCodecContext *ovCodecCtx = vstream->codec;
ovCodecCtx->codec_id = ovCodec->id;
ovCodecCtx->codec_type = CODEC_TYPE_VIDEO;
ovCodecCtx->width = outputWidth;
ovCodecCtx->height = outputHeight;
ovCodecCtx->pix_fmt = PIX_FMT_NONE;
if(ovCodec && ovCodec->pix_fmts){
const enum PixelFormat *p = ovCodec->pix_fmts;
while(*p++ != -1){
if(*p == ovCodecCtx->pix_fmt) break;
}
if(*p == -1) ovCodecCtx->pix_fmt = ovCodec->pix_fmts[0];
}
ovCodecCtx->time_base.num = 1;
ovCodecCtx->time_base.den = 30;
if(format->flags & AVFMT_GLOBALHEADER)
ovCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
if(avcodec_open(ovCodecCtx, ovCodec) != 0) return -1;
if (! ( format->flags & AVFMT_NOFILE )) {
if(url_fopen(&outFormatCtx->pb, outFormatCtx->filename, URL_WRONLY) < 0) return NO;
}
av_write_header(outFormatCtx);
int buf_size = ovCodecCtx->width * ovCodecCtx->height * 4;
uint8_t *buf = av_malloc(buf_size);
AVFrame *buffer_frame = avcodec_alloc_frame();
if(!buffer_frame) return -1;
AVFrame *frame = avcodec_alloc_frame();
if(!frame) return -1;
int numBytes = avpicture_get_size(PIX_FMT_YUV420P, outputWidth, outputHeight);
uint8_t *buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
avpicture_fill((AVPicture *)frame, buffer, PIX_FMT_YUV420P, outputWidth, outputHeight);
for(i=1;i<argc;i++){
NSAutoreleasePool *innerPool = [[NSAutoreleasePool alloc] init];
NSImage *image = [[NSImage alloc] initWithContentsOfFile:[NSString stringWithCString: argv[i] encoding: NSUTF8StringEncoding]];
CGImageRef imageRef = [image CGImageForProposedRect:nil context:nil hints:nil];
size_t w = CGImageGetWidth(imageRef);
size_t h = CGImageGetHeight(imageRef);
size_t bytesPerRow = CGImageGetBytesPerRow(imageRef);
CGDataProviderRef dataProvider = CGImageGetDataProvider(imageRef);
CFDataRef bitmapData = CGDataProviderCopyData(dataProvider);
uint8_t *buff = (uint8_t *)CFDataGetBytePtr(bitmapData);
uint8_t R,G,B,Y,U,V;
int x,y;
for(y=0;y<h;y++){
for(x=0;x<w;x++){
uint8_t *tmp = buff + y * bytesPerRow + x * 4;
R = *(tmp + 3);
G = *(tmp + 2);
B = *(tmp + 1);
Y = (0.257 * R) + (0.504 * G) + (0.098 * B) + 16;
U = -(0.148 * R) - (0.291 * G) + (0.439 * B) + 128;
V = (0.439 * R) - (0.368 * G) - (0.071 * B) + 128;
//printf("y:%d x:%d R:%d,G:%d,B:%d Y:%d,U:%d,V:%d \n",y,x,R,G,B,Y,U,V);
frame->data[0][y*frame->linesize[0]+x]= Y;
//frame->data[1][y*frame->linesize[1]+x]= U; //if coment out "Bus error"
//frame->data[2][y*frame->linesize[2]+x]= V; //if coment out "Bus error"
}
}
CGImageRelease(imageRef);
CFRelease(bitmapData);
int out_size = avcodec_encode_video (ovCodecCtx, buf, buf_size, frame);
AVPacket outPacket;
av_init_packet(&outPacket);
outPacket.stream_index= vstream->index;
outPacket.data= buf;
outPacket.size= out_size;
//outPacket.pts = ?;
//outPacket.dts = ?;
if(ovCodecCtx->coded_frame->key_frame)
outPacket.flags |= PKT_FLAG_KEY;
if(av_interleaved_write_frame(outFormatCtx, &outPacket) != 0) return -1;
[image release];
[innerPool release];
}
av_write_trailer(outFormatCtx);
if (! ( format->flags & AVFMT_NOFILE ))
if(url_fclose(outFormatCtx->pb) < 0) return -1;
avcodec_close(vstream->codec);
for(i = 0; i < outFormatCtx->nb_streams; i++) {
av_freep(&outFormatCtx->streams[i]->codec);
av_freep(&outFormatCtx->streams[i]);
}
av_freep(&outFormatCtx);
av_free(buffer);
av_free(frame);
av_free(buffer_frame);
[pool release];
return 0;
}
and mekefile is like this.
CC = /usr/bin/gcc
CFLAGS = -O4 -Wall -I/usr/local/include
LDFLAGS =
LDLIBS = -L/usr/local/bin -lavutil -lavformat -lavcodec -lswscale
FRAMEWORK = -framework Foundation -framework AppKit #-framework CoreGraphics
OBJS = test.o
test: $(OBJS)
$(CC) -o $# $(LDFLAGS) $(OBJS) $(LDLIBS) $(FRAMEWORK) -lz -lbz2 -arch x86_64
Please somebody help me.
There is a colorspace mismatch between the data of the CGImage and the destination AVFrame. In order to fix that, you need to convert the CGImage data (probably in ARGB) into the YUV420 format (FFMpeg has built-in format converter). You can get information on the colorspace of a CGImage with the CGImageGetBitsPerComponent, CGImageGetBitsPerPixel and CGImageGetBytesPerRow functions.