How to get AVFrame(ffmpeg) from NSImage/UIImage - iphone

I'd like to convert NSImage/UIImage to AVFrame(ffmpeg).
I found a example code.
http://lists.mplayerhq.hu/pipermail/libav-user/2010-April/004550.html
but this code doesn't work.
I tried another approach.
AVFrame *frame = avcodec_alloc_frame();
int numBytes = avpicture_get_size(PIX_FMT_YUV420P, outputWidth, outputHeight);
uint8_t *buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
avpicture_fill((AVPicture *)frame, buffer, PIX_FMT_YUV420P, outputWidth, outputHeight);
//UIImage *image = … smothing … ;
NSImage *image = … smothing … ;
//CGImageRef newCgImage = image.CGImage;
CGImageRef newCgImage = [image CGImageForProposedRect:nil context:nil hints:nil];
//NSBitmapImageRep* bm = [NSBitmapImageRep imageRepWithData:[image TIFFRepresentation]];
//CGImageRef newCgImage = [bm CGImage];
size_t w = CGImageGetWidth(newCgImage);
size_t h = CGImageGetHeight(cgImage);
CGDataProviderRef dataProvider = CGImageGetDataProvider(newCgImage);
CFDataRef bitmapData = CGDataProviderCopyData(dataProvider);
uint8_t *buffer = (uint8_t *)CFDataGetBytePtr(bitmapData);
frame->linesize[0] = w;
int y, x;
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
int z = y * w + x;
frame->data[0][z] = buffer[z];
}
}
but this AVFrame give me green picture.
Please let me know how can i get it.
Thanks.
following is additional.
I tried again with paying attention color format.
I found example to conver RGB to YUM.
How to perform RGB->YUV conversion in C/C++?
new code is like this.but,still doesn't work…
#import <Foundation/Foundation.h>
#import <AppKit/AppKit.h>
#import <libavutil/avstring.h>
#import <libavcodec/avcodec.h>
#import <libavformat/avformat.h>
#import <libswscale/swscale.h>
int main(int argc, char *argv[]) {
NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
int i;
int outputWidth = 480; //size must be same size of arg
int outputHeight = 360; //size must be same size of arg
av_register_all();
AVOutputFormat *format = av_guess_format("mp4", NULL, NULL);
if(!format) return -1;
AVFormatContext *outFormatCtx = avformat_alloc_context();
if(!outFormatCtx) return -1;
outFormatCtx->oformat = format;
av_strlcpy(outFormatCtx->filename, "test.mov", sizeof(outFormatCtx->filename));
AVStream *vstream = av_new_stream(outFormatCtx, 0);
if(!vstream) return -1;
enum CodecID codec_id = av_guess_codec(outFormatCtx->oformat,
NULL,
outFormatCtx->filename,
NULL, CODEC_TYPE_VIDEO);
AVCodec *ovCodec = avcodec_find_encoder(codec_id);
if(!ovCodec) return -1;
AVCodecContext *ovCodecCtx = vstream->codec;
ovCodecCtx->codec_id = ovCodec->id;
ovCodecCtx->codec_type = CODEC_TYPE_VIDEO;
ovCodecCtx->width = outputWidth;
ovCodecCtx->height = outputHeight;
ovCodecCtx->pix_fmt = PIX_FMT_NONE;
if(ovCodec && ovCodec->pix_fmts){
const enum PixelFormat *p = ovCodec->pix_fmts;
while(*p++ != -1){
if(*p == ovCodecCtx->pix_fmt) break;
}
if(*p == -1) ovCodecCtx->pix_fmt = ovCodec->pix_fmts[0];
}
ovCodecCtx->time_base.num = 1;
ovCodecCtx->time_base.den = 30;
if(format->flags & AVFMT_GLOBALHEADER)
ovCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
if(avcodec_open(ovCodecCtx, ovCodec) != 0) return -1;
if (! ( format->flags & AVFMT_NOFILE )) {
if(url_fopen(&outFormatCtx->pb, outFormatCtx->filename, URL_WRONLY) < 0) return NO;
}
av_write_header(outFormatCtx);
int buf_size = ovCodecCtx->width * ovCodecCtx->height * 4;
uint8_t *buf = av_malloc(buf_size);
AVFrame *buffer_frame = avcodec_alloc_frame();
if(!buffer_frame) return -1;
AVFrame *frame = avcodec_alloc_frame();
if(!frame) return -1;
int numBytes = avpicture_get_size(PIX_FMT_YUV420P, outputWidth, outputHeight);
uint8_t *buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
avpicture_fill((AVPicture *)frame, buffer, PIX_FMT_YUV420P, outputWidth, outputHeight);
for(i=1;i<argc;i++){
NSAutoreleasePool *innerPool = [[NSAutoreleasePool alloc] init];
NSImage *image = [[NSImage alloc] initWithContentsOfFile:[NSString stringWithCString: argv[i] encoding: NSUTF8StringEncoding]];
CGImageRef imageRef = [image CGImageForProposedRect:nil context:nil hints:nil];
size_t w = CGImageGetWidth(imageRef);
size_t h = CGImageGetHeight(imageRef);
size_t bytesPerRow = CGImageGetBytesPerRow(imageRef);
CGDataProviderRef dataProvider = CGImageGetDataProvider(imageRef);
CFDataRef bitmapData = CGDataProviderCopyData(dataProvider);
uint8_t *buff = (uint8_t *)CFDataGetBytePtr(bitmapData);
uint8_t R,G,B,Y,U,V;
int x,y;
for(y=0;y<h;y++){
for(x=0;x<w;x++){
uint8_t *tmp = buff + y * bytesPerRow + x * 4;
R = *(tmp + 3);
G = *(tmp + 2);
B = *(tmp + 1);
Y = (0.257 * R) + (0.504 * G) + (0.098 * B) + 16;
U = -(0.148 * R) - (0.291 * G) + (0.439 * B) + 128;
V = (0.439 * R) - (0.368 * G) - (0.071 * B) + 128;
//printf("y:%d x:%d R:%d,G:%d,B:%d Y:%d,U:%d,V:%d \n",y,x,R,G,B,Y,U,V);
frame->data[0][y*frame->linesize[0]+x]= Y;
//frame->data[1][y*frame->linesize[1]+x]= U; //if coment out "Bus error"
//frame->data[2][y*frame->linesize[2]+x]= V; //if coment out "Bus error"
}
}
CGImageRelease(imageRef);
CFRelease(bitmapData);
int out_size = avcodec_encode_video (ovCodecCtx, buf, buf_size, frame);
AVPacket outPacket;
av_init_packet(&outPacket);
outPacket.stream_index= vstream->index;
outPacket.data= buf;
outPacket.size= out_size;
//outPacket.pts = ?;
//outPacket.dts = ?;
if(ovCodecCtx->coded_frame->key_frame)
outPacket.flags |= PKT_FLAG_KEY;
if(av_interleaved_write_frame(outFormatCtx, &outPacket) != 0) return -1;
[image release];
[innerPool release];
}
av_write_trailer(outFormatCtx);
if (! ( format->flags & AVFMT_NOFILE ))
if(url_fclose(outFormatCtx->pb) < 0) return -1;
avcodec_close(vstream->codec);
for(i = 0; i < outFormatCtx->nb_streams; i++) {
av_freep(&outFormatCtx->streams[i]->codec);
av_freep(&outFormatCtx->streams[i]);
}
av_freep(&outFormatCtx);
av_free(buffer);
av_free(frame);
av_free(buffer_frame);
[pool release];
return 0;
}
and mekefile is like this.
CC = /usr/bin/gcc
CFLAGS = -O4 -Wall -I/usr/local/include
LDFLAGS =
LDLIBS = -L/usr/local/bin -lavutil -lavformat -lavcodec -lswscale
FRAMEWORK = -framework Foundation -framework AppKit #-framework CoreGraphics
OBJS = test.o
test: $(OBJS)
$(CC) -o $# $(LDFLAGS) $(OBJS) $(LDLIBS) $(FRAMEWORK) -lz -lbz2 -arch x86_64
Please somebody help me.

There is a colorspace mismatch between the data of the CGImage and the destination AVFrame. In order to fix that, you need to convert the CGImage data (probably in ARGB) into the YUV420 format (FFMpeg has built-in format converter). You can get information on the colorspace of a CGImage with the CGImageGetBitsPerComponent, CGImageGetBitsPerPixel and CGImageGetBytesPerRow functions.

Related

How to check results of ExtAudioFileRead?

I'm using ExtAudioFileRead to read WAV file into memory as float * buffer. However, I'm not quite sure about results - when I print them out, I get values from - 1 to + 1(which should be theoretically correct), but how can I be sure that they are correct?
- (float *) readTestFileAndSize: (int *) size
{
CFStringRef str = CFStringCreateWithCString(
NULL,
[[[NSBundle mainBundle] pathForResource: #"25" ofType:#"wav"] UTF8String],
kCFStringEncodingMacRoman
);
CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(
kCFAllocatorDefault,
str,
kCFURLPOSIXPathStyle,
false
);
ExtAudioFileRef fileRef;
ExtAudioFileOpenURL(inputFileURL, &fileRef);
SInt64 theFileLengthInFrames = 0;
// Get the total frame count
UInt32 thePropertySize = sizeof(theFileLengthInFrames);
ExtAudioFileGetProperty(fileRef, kExtAudioFileProperty_FileLengthFrames, &thePropertySize, &theFileLengthInFrames);
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat;
audioFormat.mBitsPerChannel = sizeof(Float32) * 8;
audioFormat.mChannelsPerFrame = 1; // Mono
audioFormat.mBytesPerFrame = audioFormat.mChannelsPerFrame * sizeof(Float32); // == sizeof(Float32)
audioFormat.mFramesPerPacket = 1;
audioFormat.mBytesPerPacket = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame; // = sizeof(Float32)
// 3) Apply audio format to the Extended Audio File
ExtAudioFileSetProperty(
fileRef,
kExtAudioFileProperty_ClientDataFormat,
sizeof (AudioStreamBasicDescription), //= audioFormat
&audioFormat);
int numSamples = 1024; //How many samples to read in at a time
UInt32 sizePerPacket = audioFormat.mBytesPerPacket; // = sizeof(Float32) = 32bytes
UInt32 packetsPerBuffer = numSamples;
UInt32 outputBufferSize = packetsPerBuffer * sizePerPacket;
// So the lvalue of outputBuffer is the memory location where we have reserved space
UInt8 *outputBuffer = (UInt8 *)malloc(sizeof(UInt8 *) * outputBufferSize);
NSLog(#"outputBufferSize - %llu", theFileLengthInFrames);
float* total = malloc(theFileLengthInFrames * sizeof(float));
*size = theFileLengthInFrames;
AudioBufferList convertedData;
convertedData.mNumberBuffers = 1; // Set this to 1 for mono
convertedData.mBuffers[0].mNumberChannels = audioFormat.mChannelsPerFrame; //also = 1
convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
convertedData.mBuffers[0].mData = outputBuffer; //
int totalBytes = 0;
UInt32 frameCount = numSamples;
while (frameCount > 0) {
ExtAudioFileRead(fileRef, &frameCount, &convertedData);
if (frameCount > 0) {
AudioBuffer audioBuffer = convertedData.mBuffers[0];
float *samplesAsCArray = (float *)audioBuffer.mData;
memcpy(total + totalBytes, samplesAsCArray, frameCount * sizeof(float));
totalBytes += frameCount;
}
}
return total;
}
There are only a few ways to test that I can think of:
Compare the data you've loaded to data loaded by something you know works
Play the audio data back out somehow (probably using an AudioQueue)

AVCaptureSession Display is White (no Video)

I am using an AVCaptureSession with an output setting of:
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange];
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key];
[captureOutput setVideoSettings:videoSettings];
My AVCaptureVideoPreviewLayer is displaying fine but I need more than this since I have had no success getting a screen shot using the AVCaptureVideoPreviewLayer. So when creating a CGContextRef within the captureOutput delegate, I am using these settings
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, width * 4, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
I am no longer receiving an 'unsupported parameter combination' warning, but the display is just plain white.
I should add that when I change
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange];
to
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA];
Everything works fine. What is my problem?
Take a look to the following code (it uses FullVideoRange instead) which converts "by hand" a bi-planar video frame into a RGB.
CVPixelBufferLockBaseAddress(imageBuffer, 0);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
uint8_t *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
CVPlanarPixelBufferInfo_YCbCrBiPlanar *bufferInfo = (CVPlanarPixelBufferInfo_YCbCrBiPlanar *)baseAddress;
NSUInteger yOffset = EndianU32_BtoN(bufferInfo->componentInfoY.offset);
NSUInteger yPitch = EndianU32_BtoN(bufferInfo->componentInfoY.rowBytes);
NSUInteger cbCrOffset = EndianU32_BtoN(bufferInfo->componentInfoCbCr.offset);
NSUInteger cbCrPitch = EndianU32_BtoN(bufferInfo->componentInfoCbCr.rowBytes);
uint8_t *rgbBuffer = malloc(width * height * 3);
uint8_t *yBuffer = baseAddress + yOffset;
uint8_t *cbCrBuffer = baseAddress + cbCrOffset;
for(int y = 0; y < height; y++)
{
uint8_t *rgbBufferLine = &rgbBuffer[y * width * 3];
uint8_t *yBufferLine = &yBuffer[y * yPitch];
uint8_t *cbCrBufferLine = &cbCrBuffer[(y >> 1) * cbCrPitch];
for(int x = 0; x < width; x++)
{
uint8_t y = yBufferLine[x];
uint8_t cb = cbCrBufferLine[x & ~1];
uint8_t cr = cbCrBufferLine[x | 1];
uint8_t *rgbOutput = &rgbBufferLine[x*3];
// from ITU-R BT.601, rounded to integers
rgbOutput[0] = (298 * (y - 16) + 409 * cr - 223) >> 8;
rgbOutput[1] = (298 * (y - 16) + 100 * cb + 208 * cr + 136) >> 8;
rgbOutput[2] = (298 * (y - 16) + 516 * cb - 277) >> 8;
}
}
The following link may be useful as well to better understand this video format:
http://blog.csdn.net/yiheng_l/article/details/3790219#yuvformats_nv12
Take a look at InvasiveCode's tutorial. It shows how to use Accelerate and CoreImage framework to process the Y channel

iPhone - finalizing Apple's vague "VerificationController.m"

I am trying to implement the new VerificationController.m class that Apple released to fix the in-app purchase fraud problem.
As everything released by Apple, this is one more vague, incomplete and bad explained document with a lot of voids and unknowns that cannot be circumvented/understood by everyone.
I am trying to implement that, but at the end of the code we see these four methods:
- (NSString *)encodeBase64:(const uint8_t *)input length:(NSInteger)length
{
#warning Replace this method.
return nil;
}
- (NSString *)decodeBase64:(NSString *)input length:(NSInteger *)length
{
#warning Replace this method.
return nil;
}
#warning Implement this function.
char* base64_encode(const void* buf, size_t size)
{ return NULL; }
#warning Implement this function.
void * base64_decode(const char* s, size_t * data_len)
{ return NULL; }
You can see that Apple was lazy to implement the C functions at the end of the code. As my C/C++ abilities stink, I see I need to implement these two functions in C/C++ and that they must return char and void (???). Other people have posted routines to do that on SO, but they are either in Objective-C or not returning chars and void (??).
NOTE: this is another problem I have: how can a method return void if it is used by Apple in this form?
uint8_t *purchase_info_bytes = base64_decode([purchase_info_string cStringUsingEncoding:NSASCIIStringEncoding], &purchase_info_length);
shouldn't it be returning uint8_t?
NOTE2: another problem I have is that apple says base64_encode is required but it is not being used on the code provided by them. I think they are smoking bad stuff or my C/C++ knowledge really stink.
So, returning to my first question. Can someone post/point a method that can do the job that follows the requirements of the declared methods base64_encode and base64_decode? Please refrain from posting objective-c methods that are not compatible with these requirements imposed by Apple.
Thanks.
This solution should be pretty straight forward, which includes all the methods to populate the missing information. Tested and functional within the sandbox.
// single base64 character conversion
static int POS(char c)
{
if (c>='A' && c<='Z') return c - 'A';
if (c>='a' && c<='z') return c - 'a' + 26;
if (c>='0' && c<='9') return c - '0' + 52;
if (c == '+') return 62;
if (c == '/') return 63;
if (c == '=') return -1;
[NSException raise:#"invalid BASE64 encoding" format:#"Invalid BASE64 encoding"];
return 0;
}
- (NSString *)encodeBase64:(const uint8_t *)input length:(NSInteger)length
{
return [NSString stringWithUTF8String:base64_encode(input, (size_t)length)];
}
- (NSString *)decodeBase64:(NSString *)input length:(NSInteger *)length
{
size_t retLen;
uint8_t *retStr = base64_decode([input UTF8String], &retLen);
if (length)
*length = (NSInteger)retLen;
NSString *st = [[[NSString alloc] initWithBytes:retStr
length:retLen
encoding:NSUTF8StringEncoding] autorelease];
free(retStr); // If base64_decode returns dynamically allocated memory
return st;
}
char* base64_encode(const void* buf, size_t size)
{
static const char base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
char* str = (char*) malloc((size+3)*4/3 + 1);
char* p = str;
unsigned char* q = (unsigned char*) buf;
size_t i = 0;
while(i < size) {
int c = q[i++];
c *= 256;
if (i < size) c += q[i];
i++;
c *= 256;
if (i < size) c += q[i];
i++;
*p++ = base64[(c & 0x00fc0000) >> 18];
*p++ = base64[(c & 0x0003f000) >> 12];
if (i > size + 1)
*p++ = '=';
else
*p++ = base64[(c & 0x00000fc0) >> 6];
if (i > size)
*p++ = '=';
else
*p++ = base64[c & 0x0000003f];
}
*p = 0;
return str;
}
void* base64_decode(const char* s, size_t* data_len_ptr)
{
size_t len = strlen(s);
if (len % 4)
[NSException raise:#"Invalid input in base64_decode" format:#"%d is an invalid length for an input string for BASE64 decoding", len];
unsigned char* data = (unsigned char*) malloc(len/4*3);
int n[4];
unsigned char* q = (unsigned char*) data;
for(const char*p=s; *p; )
{
n[0] = POS(*p++);
n[1] = POS(*p++);
n[2] = POS(*p++);
n[3] = POS(*p++);
if (n[0]==-1 || n[1]==-1)
[NSException raise:#"Invalid input in base64_decode" format:#"Invalid BASE64 encoding"];
if (n[2]==-1 && n[3]!=-1)
[NSException raise:#"Invalid input in base64_decode" format:#"Invalid BASE64 encoding"];
q[0] = (n[0] << 2) + (n[1] >> 4);
if (n[2] != -1) q[1] = ((n[1] & 15) << 4) + (n[2] >> 2);
if (n[3] != -1) q[2] = ((n[2] & 3) << 6) + n[3];
q += 3;
}
// make sure that data_len_ptr is not null
if (!data_len_ptr)
[NSException raise:#"Invalid input in base64_decode" format:#"Invalid destination for output string length"];
*data_len_ptr = q-data - (n[2]==-1) - (n[3]==-1);
return data;
}
Here is a base 64 encode function for NSString to NSString:
+(NSString *) encodeString:(NSString *)inString
{
NSData *data = [inString dataUsingEncoding:NSUTF8StringEncoding];
//Point to start of the data and set buffer sizes
int inLength = [data length];
int outLength = ((((inLength * 4)/3)/4)*4) + (((inLength * 4)/3)%4 ? 4 : 0);
const char *inputBuffer = [data bytes];
char *outputBuffer = malloc(outLength);
outputBuffer[outLength] = 0;
//64 digit code
static char Encode[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
//start the count
int cycle = 0;
int inpos = 0;
int outpos = 0;
char temp;
outputBuffer[outLength-1] = '=';
outputBuffer[outLength-2] = '=';
while (inpos < inLength){
switch (cycle) {
case 0:
outputBuffer[outpos++] = Encode[(inputBuffer[inpos]&0xFC)>>2];
cycle = 1;
break;
case 1:
temp = (inputBuffer[inpos++]&0x03)<<4;
outputBuffer[outpos] = Encode[temp];
cycle = 2;
break;
case 2:
outputBuffer[outpos++] = Encode[temp|(inputBuffer[inpos]&0xF0)>> 4];
temp = (inputBuffer[inpos++]&0x0F)<<2;
outputBuffer[outpos] = Encode[temp];
cycle = 3;
break;
case 3:
outputBuffer[outpos++] = Encode[temp|(inputBuffer[inpos]&0xC0)>>6];
cycle = 4;
break;
case 4:
outputBuffer[outpos++] = Encode[inputBuffer[inpos++]&0x3f];
cycle = 0;
break;
default:
cycle = 0;
break;
}
}
NSString *pictemp = [NSString stringWithUTF8String:outputBuffer];
free(outputBuffer);
return pictemp;
}
and Here is a base 64 decode function for NSString to NSString:
+(NSString *) decodeString:(NSString *)inString
{
const char* string = [inString cStringUsingEncoding:NSASCIIStringEncoding];
NSInteger inputLength = inString.length;
static char decodingTable[128];
static char encodingTable[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
for (NSInteger i = 0; i < 128; i++) {
decodingTable[encodingTable[i]] = i;
}
if ((string == NULL) || (inputLength % 4 != 0)) {
return nil;
}
while (inputLength > 0 && string[inputLength - 1] == '=') {
inputLength--;
}
NSInteger outputLength = inputLength * 3 / 4;
NSMutableData* data = [NSMutableData dataWithLength:outputLength];
uint8_t* output = data.mutableBytes;
NSInteger inputPoint = 0;
NSInteger outputPoint = 0;
while (inputPoint < inputLength) {
char i0 = string[inputPoint++];
char i1 = string[inputPoint++];
char i2 = inputPoint < inputLength ? string[inputPoint++] : 'A'; /* 'A' will decode to \0 */
char i3 = inputPoint < inputLength ? string[inputPoint++] : 'A';
output[outputPoint++] = (decodingTable[i0] << 2) | (decodingTable[i1] >> 4);
if (outputPoint < outputLength) {
output[outputPoint++] = ((decodingTable[i1] & 0xf) << 4) | (decodingTable[i2] >> 2);
}
if (outputPoint < outputLength) {
output[outputPoint++] = ((decodingTable[i2] & 0x3) << 6) | decodingTable[i3];
}
}
NSLog(#"%#",data);
NSString *finalString = [[NSString alloc] initWithData:data encoding:NSASCIIStringEncoding];
return finalString;
}
These were pieced together from examples I found in various places on the internet when I was searching for them a while ago. They, may be easier for you to implement. I just created a Base64 class and placed these methods in it.
Here are the C wrappers around Justin's answer:
char* base64_encode(const void* buf, size_t size)
{
NSData* data = [NSData dataWithBytesNoCopy:(void*)buf length:size];
NSString* string = [[NSString alloc] initWithData:data encoding:NSASCIIStringEncoding];
return [[_Class_ encode:string] UTF8String];
}
void* base64_Decode (const char* s, size_t* data_len)
{
NSString* result = [_Class_ decode:[NSString stringWithCString:s encoding:NSASCIIStringEncoding]];
*data_len = result.length;
return [result UTF8String];
}
Where Class is the class that contains Justin's functions.

To make blur effect on UIImage iphone Problem?

Hi here in this code i make the image to blur. but the problem is, it is very slow on iPhone.
here is my code.
- (UIImage*) gaussianBlur:(NSUInteger)radius
{
// Pre-calculated kernel
double dKernel[5][5]={
{1.0f/273.0f, 4.0f/273.0f, 7.0f/273.0f, 4.0f/273.0f, 1.0f/273.0f},
{4.0f/273.0f, 16.0f/273.0f, 26.0f/273.0f, 16.0f/273.0f, 4.0f/273.0f},
{7.0f/273.0f, 26.0f/273.0f, 41.0f/273.0f, 26.0f/273.0f, 7.0f/273.0f},
{4.0f/273.0f, 16.0f/273.0f, 26.0f/273.0f, 16.0f/273.0f, 4.0f/273.0f},
{1.0f/273.0f, 4.0f/273.0f, 7.0f/273.0f, 4.0f/273.0f, 1.0f/273.0f}};
NSMutableArray *kernel = [[[NSMutableArray alloc] initWithCapacity:5] autorelease];
for (int i = 0; i < 5; i++) {
NSMutableArray *row = [[[NSMutableArray alloc] initWithCapacity:5] autorelease];
for (int j = 0; j < 5; j++) {
[row addObject:[NSNumber numberWithDouble:dKernel[i][j]]];
}
[kernel addObject:row];
}
return [self applyConvolve:kernel];
}
- (UIImage*) applyConvolve:(NSArray*)kernel
{
CGImageRef inImage = self.CGImage;
CFDataRef m_DataRef = CGDataProviderCopyData(CGImageGetDataProvider(inImage));
CFDataRef m_OutDataRef = CGDataProviderCopyData(CGImageGetDataProvider(inImage));
UInt8 * m_PixelBuf = (UInt8 *) CFDataGetBytePtr(m_DataRef);
UInt8 * m_OutPixelBuf = (UInt8 *) CFDataGetBytePtr(m_OutDataRef);
int h = CGImageGetHeight(inImage);
int w = CGImageGetWidth(inImage);
int kh = [kernel count] / 2;
int kw = [[kernel objectAtIndex:0] count] / 2;
int i = 0, j = 0, n = 0, m = 0;
for (i = 0; i < h; i++) {
for (j = 0; j < w; j++) {
int outIndex = (i*w*4) + (j*4);
double r = 0, g = 0, b = 0;
for (n = -kh; n <= kh; n++) {
for (m = -kw; m <= kw; m++) {
if (i + n >= 0 && i + n < h) {
if (j + m >= 0 && j + m < w) {
double f = [[[kernel objectAtIndex:(n + kh)] objectAtIndex:(m + kw)] doubleValue];
if (f == 0) {continue;}
int inIndex = ((i+n)*w*4) + ((j+m)*4);
r += m_PixelBuf[inIndex] * f;
g += m_PixelBuf[inIndex + 1] * f;
b += m_PixelBuf[inIndex + 2] * f;
}
}
}
}
m_OutPixelBuf[outIndex] = SAFECOLOR((int)r);
m_OutPixelBuf[outIndex + 1] = SAFECOLOR((int)g);
m_OutPixelBuf[outIndex + 2] = SAFECOLOR((int)b);
m_OutPixelBuf[outIndex + 3] = 255;
}
}
CGContextRef ctx = CGBitmapContextCreate(m_OutPixelBuf,
CGImageGetWidth(inImage),
CGImageGetHeight(inImage),
CGImageGetBitsPerComponent(inImage),
CGImageGetBytesPerRow(inImage),
CGImageGetColorSpace(inImage),
CGImageGetBitmapInfo(inImage)
);
CGImageRef imageRef = CGBitmapContextCreateImage(ctx);
CGContextRelease(ctx);
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CFRelease(m_DataRef);
CFRelease(m_OutDataRef);
return finalImage;
}
A 2-d Gaussian convolution is separable, which means it can be done as 2 1-d convolutions, one horizontal and one vertical. That should save you a bit of time.
http://en.wikipedia.org/wiki/Gaussian_blur#Implementation

Setting the contrast of an image in iPhone

I'm using the following code for setting the contrast of an image based on the slider value. The slider range is from 0.0f to 2.0f. Its runnig fine in simulator but its crashing on device due to low memory. Can any one help me what's wrong in this code.
Thanks in advance....
-(void)contrast:(float)value{
CGImageRef img=refImage.CGImage;
CFDataRef dataref=CopyImagePixels(img);
UInt8 *data=(UInt8 *)CFDataGetBytePtr(dataref);
int length=CFDataGetLength(dataref);
for(int index=0;index<length;index+=4){
int alphaCount = data[index+0];
int redCount = data[index+1];
int greenCount = data[index+2];
int blueCount = data[index+3];
alphaCount = ((alphaCount-128)*value ) + 128;
if (alphaCount < 0) alphaCount = 0; if (alphaCount>255) alphaCount =255;
data[index+0] = (Byte) alphaCount;
redCount = ((redCount-128)*value ) + 128;
if (redCount < 0) redCount = 0; if (redCount>255) redCount =255;
data[index+1] = (Byte) redCount;
greenCount = ((greenCount-128)*value ) + 128;
if (greenCount < 0) greenCount = 0; if (greenCount>255) greenCount =255;
data[index+2] = (Byte) greenCount;
blueCount = ((blueCount-128)*value ) + 128;
if (blueCount < 0) blueCount = 0; if (blueCount>255) blueCount =255;
data[index+3] = (Byte) blueCount;
}
size_t width=CGImageGetWidth(img);
size_t height=CGImageGetHeight(img);
size_t bitsPerComponent=CGImageGetBitsPerComponent(img);
size_t bitsPerPixel=CGImageGetBitsPerPixel(img);
size_t bytesPerRow=CGImageGetBytesPerRow(img);
CGColorSpaceRef colorspace=CGImageGetColorSpace(img);
CGBitmapInfo bitmapInfo=CGImageGetBitmapInfo(img);
CFDataRef newData=CFDataCreate(NULL,data,length);
CGDataProviderRef provider=CGDataProviderCreateWithCFData(newData);
CGImageRef newImg=CGImageCreate(width,height,bitsPerComponent,bitsPerPixel,bytesPerRow,colorspace,bitmapInfo,provider,NULL,true,kCGRenderingIntentDefault);
[ImgView setImage:[UIImage imageWithCGImage:newImg]];
CGImageRelease(newImg);
CGDataProviderRelease(provider);
}
You might have some memory leaks.
Any function that is CF...Create() will need to have corresponding CFRelease() called on it. The following has no release:
CFDataRef newData=CFDataCreate(NULL,data,length);
I think you need to clean up after copying as well:
CFDataRef dataref=CopyImagePixels(img);
You cleaned up after newImg okay. Can't see any other leaks but check your Create/Copying that you clean up the memory afterwards.