Convert to grayscale - too slow - iphone

I've made a class that converts an image into grayscale. But it works way too slow. Is there a way to make it work faster?
Here's my class:
#implementation PixelProcessing
SYNTHESIZE_SINGLETON_FOR_CLASS(PixelProcessing);
#define bytesPerPixel 4
#define bitsPerComponent 8
-(UIImage*)scaleAndRotateImage: (UIImage*)img withMaxResolution: (int)kMaxResolution
{
CGImageRef imgRef = img.CGImage;
CGFloat width = CGImageGetWidth(imgRef);
CGFloat height = CGImageGetHeight(imgRef);
CGAffineTransform transform = CGAffineTransformIdentity;
CGRect bounds = CGRectMake(0, 0, width, height);
if ( (kMaxResolution != 0) && (width > kMaxResolution || height > kMaxResolution) ) {
CGFloat ratio = width/height;
if (ratio > 1) {
bounds.size.width = kMaxResolution;
bounds.size.height = bounds.size.width / ratio;
}
else {
bounds.size.height = kMaxResolution;
bounds.size.width = bounds.size.height * ratio;
}
}
CGFloat scaleRatio;
if (kMaxResolution != 0){
scaleRatio = bounds.size.width / width;
} else
{
scaleRatio = 1.0f;
}
CGSize imageSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef));
CGFloat boundHeight;
UIImageOrientation orient = img.imageOrientation;
switch(orient) {
case UIImageOrientationUp: //EXIF = 1
transform = CGAffineTransformIdentity;
break;
case UIImageOrientationUpMirrored: //EXIF = 2
transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
break;
case UIImageOrientationDown: //EXIF = 3
transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationDownMirrored: //EXIF = 4
transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
break;
case UIImageOrientationLeftMirrored: //EXIF = 5
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationLeft: //EXIF = 6
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationRightMirrored: //EXIF = 7
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeScale(-1.0, 1.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
case UIImageOrientationRight: //EXIF = 8
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
default:
[NSException raise:NSInternalInconsistencyException format: #"Invalid image orientation"];
}
UIGraphicsBeginImageContext(bounds.size);
CGContextRef context = UIGraphicsGetCurrentContext();
if (orient == UIImageOrientationRight || orient == UIImageOrientationLeft) {
CGContextScaleCTM(context, -scaleRatio, scaleRatio);
CGContextTranslateCTM(context, -height, 0);
}
else {
CGContextScaleCTM(context, scaleRatio, -scaleRatio);
CGContextTranslateCTM(context, 0, -height);
}
CGContextConcatCTM(context, transform);
CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef);
UIImage *tempImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return tempImage;
}
#pragma mark Getting Ans Writing Pixels
-(float*) getColorForPixel: (NSUInteger)xCoordinate andForY: (NSUInteger)yCoordinate
{
int byteIndex = (bytesPerRow * yCoordinate) + xCoordinate * bytesPerPixel;
float *colorToReturn = malloc(3);
colorToReturn[0] = bitmap[byteIndex] / 255.f; //Red
colorToReturn[1] = bitmap[byteIndex + 1] / 255.f; //Green
colorToReturn[2] = bitmap[byteIndex + 2] / 255.f; //Blue
return colorToReturn;
}
-(void) writeColor: (float*)colorToWrite forPixelAtX: (NSUInteger)xCoordinate andY: (NSUInteger)yCoordinate
{
int byteIndex = (bytesPerRow * yCoordinate) + xCoordinate * bytesPerPixel;
bitmap[byteIndex] = (unsigned char) ( colorToWrite[0] * 255);
bitmap[byteIndex + 1] = (unsigned char) ( colorToWrite[1] * 255);
bitmap[byteIndex + 2] = (unsigned char) ( colorToWrite[2] * 255);
}
#pragma mark Bitmap
-(float) getAverageBrightnessForImage: (UIImage*)img
{
UIImage *tempImage = [self scaleAndRotateImage: img withMaxResolution: 100];
unsigned char *rawData = [self getBytesForImage: tempImage];
double aBrightness = 0;
for(int y = 0; y < tempImage.size.height; y++) {
for(int x = 0; x < tempImage.size.width; x++) {
int byteIndex = ( (tempImage.size.width * y) + x) * bytesPerPixel;
aBrightness += (rawData[byteIndex] + rawData[byteIndex + 1] + rawData[byteIndex + 2]);
}
}
free(rawData);
aBrightness /= 3.0f;
aBrightness /= 255.0f;
aBrightness /= tempImage.size.width * tempImage.size.height;
return aBrightness;
}
-(unsigned char*) getBytesForImage: (UIImage*)pImage
{
CGImageRef image = [pImage CGImage];
NSUInteger width = CGImageGetWidth(image);
NSUInteger height = CGImageGetHeight(image);
bytesPerRow = bytesPerPixel * width;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = malloc(height * width * bytesPerPixel);
CGContextRef context = CGBitmapContextCreate(rawData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
CGContextRelease(context);
return rawData;
}
-(void) loadWithImage: (UIImage*)img
{
averageBrightness = [self getAverageBrightnessForImage: img];
currentImage = [self scaleAndRotateImage: img withMaxResolution: 0];
imgWidth = currentImage.size.width;
imgHeight = currentImage.size.height;
bitmap = [self getBytesForImage: currentImage];
bytesPerRow = bytesPerPixel * imgWidth;
}
-(void) processImage
{
// now convert to grayscale
for(int y = 0; y < imgHeight; y++) {
for(int x = 0; x < imgWidth; x++) {
float *currentColor = [self getColorForPixel: x andForY: y];
//Grayscale
float averageColor = (currentColor[0] + currentColor[1] + currentColor[2]) / 3.0f;
averageColor += 0.5f - averageBrightness;
if (averageColor > 1.0f) averageColor = 1.0f;
currentColor[0] = averageColor;
currentColor[1] = averageColor;
currentColor[2] = averageColor;
[self writeColor: currentColor forPixelAtX: x andY: y];
free(currentColor);
}
}
}
-(UIImage*) getProcessedImage
{
// create a UIImage
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(bitmap, imgWidth, imgHeight, bitsPerComponent, bytesPerRow, colorSpace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast);
CGImageRef image = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
UIImage *resultUIImage = [UIImage imageWithCGImage: image];
CGImageRelease(image);
return resultUIImage;
}
-(void) releaseCurrentImage
{
free(bitmap);
}
#end
And I convert an image into grayscale in the following way:
[ [PixelProcessing sharedPixelProcessing] loadWithImage: imageToDisplay.image];
[ [PixelProcessing sharedPixelProcessing] processImage];
imageToDisplay.image = [ [PixelProcessing sharedPixelProcessing] getProcessedImage];
[ [PixelProcessing sharedPixelProcessing] releaseCurrentImage];
Why is it working so slow? Is there a way to get float values for RGB color components of pixel? How can I optimize it?
Thanks.

You could let Quartz do the grayscale conversion for you:
CGImageRef grayscaleCGImageFromCGImage(CGImageRef inputImage) {
size_t width = CGImageGetWidth(inputImage);
size_t height = CGImageGetHeight(inputImage);
// Create a gray scale context and render the input image into that
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceGray();
CGContextRef context = CGBitmapContextCreate(NULL, width, height, 8,
4*width, colorspace, kCGBitmapByteOrderDefault);
CGContextDrawImage(context, CGRectMake(0,0, width,height), inputImage);
// Get an image representation of the grayscale context which the input
// was rendered into.
CGImageRef outputImage = CGBitmapContextCreateImage(context);
// Cleanup
CGContextRelease(context);
CGColorSpaceRelease(colorspace);
return (CGImageRef)[(id)outputImage autorelease];
}

I had to solve this same problem recently and came up with the following code (it also preserves alpha):
#implementation UIImage (grayscale)
typedef enum {
ALPHA = 0,
BLUE = 1,
GREEN = 2,
RED = 3
} PIXELS;
- (UIImage *)convertToGrayscale {
CGSize size = [self size];
int width = size.width;
int height = size.height;
// the pixels will be painted to this array
uint32_t *pixels = (uint32_t *) malloc(width * height * sizeof(uint32_t));
// clear the pixels so any transparency is preserved
memset(pixels, 0, width * height * sizeof(uint32_t));
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// create a context with RGBA pixels
CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace,
kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);
// paint the bitmap to our context which will fill in the pixels array
CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]);
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
uint8_t *rgbaPixel = (uint8_t *) &pixels[y * width + x];
// convert to grayscale using recommended method: http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
uint32_t gray = 0.3 * rgbaPixel[RED] + 0.59 * rgbaPixel[GREEN] + 0.11 * rgbaPixel[BLUE];
// set the pixels to gray
rgbaPixel[RED] = gray;
rgbaPixel[GREEN] = gray;
rgbaPixel[BLUE] = gray;
}
}
// create a new CGImageRef from our context with the modified pixels
CGImageRef image = CGBitmapContextCreateImage(context);
// we're done with the context, color space, and pixels
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
free(pixels);
// make a new UIImage to return
UIImage *resultUIImage = [UIImage imageWithCGImage:image];
// we're done with image now too
CGImageRelease(image);
return resultUIImage;
}
#end

The way to find out your speed issue is to profile using Shark. (In Xcode, Run->Start with Performance Tool->Shark.) However, in this case I feel reasonably certain that the primary problems are the per-pixel malloc/free, the floating-point arithmetic, and the two method calls in the inner processing loop.
To avoid the malloc/free, you want to be doing something like this instead:
- (void) getColorForPixelX:(NSUInteger)x y:(NSUInteger)y pixel:(float[3])pixel
{ /* Write stuff to pixel[0], pixel[1], pixel[2] */ }
// To call:
float pixel[3];
for (each pixel)
{
[self getColorForPixelX:x y:y pixel:pixel];
// Do stuff
}
The second likely source of slowdown is the use of floating point – or rather, the cost of converting to and from floating point. For the filter you’re writing, working in integer maths is simple – add the integer pixel values and divide by 255*3. (Incidentally, that’s a pretty bad way to convert to greyscale. See http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale.)
Method calls are fast for what they are, but still pretty slow compared to the basic arithmetic of the filter. (For some numbers, see this article.) The easy way to eliminate the method calls is to replace them with inline functions.

Have you tried using the luminosity blend mode? A white image blended with your original with that blend mode seems to produce grayscale.
These two, foreground image on the right and background image on the left:
alt text http://developer.apple.com/iphone/library/documentation/GraphicsImaging/Conceptual/drawingwithquartz2d/Art/both_images.jpg
blended with kCGBlendModeLuminosity results in this:
alt text http://developer.apple.com/iphone/library/documentation/GraphicsImaging/Conceptual/drawingwithquartz2d/Art/luminosity_image.jpg
For details, see: Drawing with Quartz 2D: Using Blend Modes With Images

Related

How to scale image?

How to scale image with goo quality. I am using this following code but when I am taking using image from Camera its showing in different frame.
-(UIImage *)resizeImage:(UIImage *)image {
int w = image.size.width;
int h = image.size.height;
CGImageRef imageRef = [image CGImage];
int width, height;
int destWidth = 640;
int destHeight = 480;
if(w > h){
width = destWidth;
height = h*destWidth/w;
} else {
height = destHeight;
width = w*destHeight/h;
}
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef bitmap;
bitmap = CGBitmapContextCreate(NULL, width, height, 8, 4 * width, colorSpace, kCGImageAlphaPremultipliedFirst);
if (image.imageOrientation == UIImageOrientationLeft) {
CGContextRotateCTM (bitmap, M_PI/2);
CGContextTranslateCTM (bitmap, 0, -height);
} else if (image.imageOrientation == UIImageOrientationRight) {
CGContextRotateCTM (bitmap, -M_PI/2);
CGContextTranslateCTM (bitmap, -width, 0);
} else if (image.imageOrientation == UIImageOrientationUp) {
} else if (image.imageOrientation == UIImageOrientationDown) {
CGContextTranslateCTM (bitmap, width,height);
CGContextRotateCTM (bitmap, -M_PI);
}
CGContextDrawImage(bitmap, CGRectMake(0, 0, width, height), imageRef);
CGImageRef ref = CGBitmapContextCreateImage(bitmap);
UIImage *result = [UIImage imageWithCGImage:ref];
CGContextRelease(bitmap);
CGImageRelease(ref);
return result;
}
If there is anyway to reduce size without affecting to quality please suggest
Thanks in advance
In order to reduce size, you will have to compromise on the resolution of the image. I suggest you calculate a new WIDTH and HEIGHT for the image.
Here is something that worked for me:
Try this:
-(UIImage*) resizeImage:(CGImageRef)image toWidth:(int)width andHeight:(int)height
{
// create context, keeping original image properties
CGColorSpaceRef colorspace = CGImageGetColorSpace(image);
CGContextRef context = CGBitmapContextCreate(NULL, width, height, CGImageGetBitsPerComponent(image), CGImageGetBytesPerRow(image),
colorspace,
CGImageGetAlphaInfo(image));
if(context == NULL)
{
NSLog(#"Could not re-size");
return nil;
}
// draw image to context (resizing it)
CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
// extract resulting image from context
CGImageRef imgRef = CGBitmapContextCreateImage(context);
CGContextRelease(context);
UIImage* resizedImage = [UIImage imageWithCGImage:imgRef];
CGImageRelease(imgRef);
return resizedImage;
}
I have used the below code for scale the image for camera image. please implement if you found help ful
- (void)imagePickerController:(UIImagePickerController *)picker
didFinishPickingImage:(UIImage *)image
editingInfo:(NSDictionary *)editingInfo
{
//// ADDIGN THIS LINE WHEN YOU CAPTURED IMAGE
image = [self scaleAndRotateImage:image];
[self useImage:image];
[[picker parentViewController] dismissModalViewControllerAnimated:YES];
}
/// THE METHODS ROTATE THE CURRENT CAPTURED IMAGE AT 90 DEGREE OF ANGLE
- (void)scaleAndRotateImage:(UIImage *)image
{
int kMaxResolution = 320; // Or whatever
CGImageRef imgRef = image.CGImage;
CGFloat width = CGImageGetWidth(imgRef);
CGFloat height = CGImageGetHeight(imgRef);
CGAffineTransform transform = CGAffineTransformIdentity;
CGRect bounds = CGRectMake(0, 0, width, height);
if (width > kMaxResolution || height > kMaxResolution) {
CGFloat ratio = width/height;
if (ratio > 1) {
bounds.size.width = kMaxResolution;
bounds.size.height = bounds.size.width / ratio;
}
else {
bounds.size.height = kMaxResolution;
bounds.size.width = bounds.size.height * ratio;
}
}
CGFloat scaleRatio = bounds.size.width / width;
CGSize imageSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef));
CGFloat boundHeight;
UIImageOrientation orient = image.imageOrientation;
switch(orient) {
case UIImageOrientationUp: //EXIF = 1
transform = CGAffineTransformIdentity;
break;
case UIImageOrientationUpMirrored: //EXIF = 2
transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
break;
case UIImageOrientationDown: //EXIF = 3
transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationDownMirrored: //EXIF = 4
transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
break;
case UIImageOrientationLeftMirrored: //EXIF = 5
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationLeft: //EXIF = 6
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationRightMirrored: //EXIF = 7
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeScale(-1.0, 1.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
case UIImageOrientationRight: //EXIF = 8
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
default:
[NSException raise:NSInternalInconsistencyException format:#"Invalid image orientation"];
}
UIGraphicsBeginImageContext(bounds.size);
CGContextRef context = UIGraphicsGetCurrentContext();
if (orient == UIImageOrientationRight || orient == UIImageOrientationLeft) {
CGContextScaleCTM(context, -scaleRatio, scaleRatio);
CGContextTranslateCTM(context, -height, 0);
}
else {
CGContextScaleCTM(context, scaleRatio, -scaleRatio);
CGContextTranslateCTM(context, 0, -height);
}
CGContextConcatCTM(context, transform);
CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef);
UIImage *imageCopy = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
[self setRotatedImage:imageCopy];
//return imageCopy;
}

matchTemplate opencv not working as shown in opencv document

I am struggling with the problem as IN the image that I have captured from camera , I need to find the existence and location of some patterns.
For this I found to use matchTemplate method of opencv.I used the images used in the sample of opencv and wrote the code but the result is different.
http://opencv.itseez.com/doc/tutorials/imgproc/histograms/template_matching/template_matching.html
This is the link which tell us about matchTemplate.
When I implemented it it shows the result:-
My code is below:-
-(void)matchPatchNet
{
IplImage *res;
CvPoint minloc, maxloc;
double minval, maxval;
int img_width, img_height;
int tpl_width, tpl_height;
int res_width, res_height;
NSString *pathPatron = [[NSBundle mainBundle] pathForResource:#"timage" ofType:#"jpg"];
UIImage *tim = [UIImage imageWithContentsOfFile:pathPatron];
NSString *pathPatron2 = [[NSBundle mainBundle] pathForResource:#"simage" ofType:#"jpg"];
UIImage *tim2 = [UIImage imageWithContentsOfFile:pathPatron2];
IplImage *img = [self CreateIplImageFromUIImage:tim2];//
IplImage *tpl = [self CreateIplImageFromUIImage:tim];
cv::Mat forground1 = [tim2 CVMat];
cv::Mat forground2 = [tim CVMat];
img_width = img->width;
img_height = img->height;
tpl_width = tpl->width;
tpl_height = tpl->height;
res_width = img_width - tpl_width + 1;
res_height = img_height - tpl_height + 1;
res = cvCreateImage( cvSize( res_width, res_height ), IPL_DEPTH_32F, 1 );
cvMatchTemplate( img, tpl, res, CV_TM_CCOEFF_NORMED );
UIImage *ipala=[self UIImageFromIplImage:res];
cv::Mat forground3 = [ipala CVMat];
cv::normalize(forground3, forground3, 0, 1, cv::NORM_MINMAX, CV_8UC1);
cvMinMaxLoc( res, &minval, &maxval, &minloc, &maxloc, 0 );
cvRectangle( img,
cvPoint( maxloc.x, maxloc.y ),
cvPoint( maxloc.x + tpl_width, maxloc.y + tpl_height ),
cvScalar( 0, 255, 0, 0 ), 1, 0, 0 );
/* display images */
self.imageView.image = [self UIImageFromIplImage:img];
cvReleaseImage(&img);
cvReleaseImage(&tpl);
cvReleaseImage(&res);
}
Please tell me what am I doing wrong .Please help me.
Thanks in advance
I strongly suggest you to use the C++ interface and the current docs, which you'll find here: OpenCV v2.4.2 documentation
Get the lastest Version of OpenCV for iOS here: OpenCV for iOS and drop it into your project and include this into your project prefixes:
ExampleApp-Prefix.pch:
#ifdef __cplusplus
#import <opencv2/opencv.hpp>
#endif
Use this to "convert" UIImages to cv::Mats:
UIImageCVMatConverter.h:
//
// UIImageCVMatConverter.h
//
#import <Foundation/Foundation.h>
#interface UIImageCVMatConverter : NSObject {
}
+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat;
+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat withUIImage:(UIImage*)image;
+ (cv::Mat)cvMatFromUIImage:(UIImage *)image;
+ (cv::Mat)cvMatGrayFromUIImage:(UIImage *)image;
+ (UIImage *)scaleAndRotateImageFrontCamera:(UIImage *)image;
+ (UIImage *)scaleAndRotateImageBackCamera:(UIImage *)image;
#end
UIImageCVMatConverter.mm:
//
// UIImageCVMatConverter.m
//
#import "UIImageCVMatConverter.h"
#implementation UIImageCVMatConverter
+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat withUIImage:(UIImage*)image;
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace( image.CGImage );
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
CGFloat widthStep = image.size.width;
CGContextRef contextRef = CGBitmapContextCreate( NULL, cols, rows, 8, widthStep*4, colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault );
CGContextDrawImage( contextRef, CGRectMake(0, 0, cols, rows), image.CGImage );
CGContextSetRGBStrokeColor( contextRef, 1, 0, 0, 1 );
CGImageRef cgImage = CGBitmapContextCreateImage( contextRef );
UIImage* result = [UIImage imageWithCGImage:cgImage];
CGImageRelease( cgImage );
CGContextRelease( contextRef );
CGColorSpaceRelease( colorSpace );
return result;
}
+(UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
CGColorSpaceRef colorSpace;
if ( cvMat.elemSize() == 1 ) {
colorSpace = CGColorSpaceCreateDeviceGray();
}
else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData( (__bridge CFDataRef)data );
CGImageRef imageRef = CGImageCreate( cvMat.cols, cvMat.rows, 8, 8 * cvMat.elemSize(), cvMat.step[0], colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault );
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease( imageRef );
CGDataProviderRelease( provider );
CGColorSpaceRelease( colorSpace );
return finalImage;
}
+ (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace( image.CGImage );
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat( rows, cols, CV_8UC4 );
CGContextRef contextRef = CGBitmapContextCreate( cvMat.data, cols, rows, 8, cvMat.step[0], colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault );
CGContextDrawImage( contextRef, CGRectMake(0, 0, cols, rows), image.CGImage );
CGContextRelease( contextRef );
CGColorSpaceRelease( colorSpace );
return cvMat;
}
+ (cv::Mat)cvMatGrayFromUIImage:(UIImage *)image
{
cv::Mat cvMat = [UIImageCVMatConverter cvMatFromUIImage:image];
cv::Mat grayMat;
if ( cvMat.channels() == 1 ) {
grayMat = cvMat;
}
else {
grayMat = cv :: Mat( cvMat.rows,cvMat.cols, CV_8UC1 );
cv::cvtColor( cvMat, grayMat, CV_BGR2GRAY );
}
return grayMat;
}
+ (UIImage *)scaleAndRotateImageBackCamera:(UIImage *)image
{
static int kMaxResolution = 640;
CGImageRef imgRef = image.CGImage;
CGFloat width = CGImageGetWidth( imgRef );
CGFloat height = CGImageGetHeight( imgRef );
CGAffineTransform transform = CGAffineTransformIdentity;
CGRect bounds = CGRectMake( 0, 0, width, height );
if ( width > kMaxResolution || height > kMaxResolution ) {
CGFloat ratio = width/height;
if ( ratio > 1 ) {
bounds.size.width = kMaxResolution;
bounds.size.height = bounds.size.width / ratio;
}
else {
bounds.size.height = kMaxResolution;
bounds.size.width = bounds.size.height * ratio;
}
}
CGFloat scaleRatio = bounds.size.width / width;
CGSize imageSize = CGSizeMake( CGImageGetWidth(imgRef), CGImageGetHeight(imgRef) );
CGFloat boundHeight;
UIImageOrientation orient = image.imageOrientation;
switch( orient ) {
case UIImageOrientationUp:
transform = CGAffineTransformIdentity;
break;
case UIImageOrientationUpMirrored:
transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
break;
case UIImageOrientationDown:
transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationDownMirrored:
transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
break;
case UIImageOrientationLeftMirrored:
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationLeft:
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationRightMirrored:
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeScale(-1.0, 1.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
case UIImageOrientationRight:
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
default:
[NSException raise:NSInternalInconsistencyException format:#"Invalid image orientation"];
}
UIGraphicsBeginImageContext( bounds.size );
CGContextRef context = UIGraphicsGetCurrentContext();
if ( orient == UIImageOrientationRight || orient == UIImageOrientationLeft ) {
CGContextScaleCTM( context, -scaleRatio, scaleRatio );
CGContextTranslateCTM( context, -height, 0 );
}
else {
CGContextScaleCTM( context, scaleRatio, -scaleRatio );
CGContextTranslateCTM( context, 0, -height );
}
CGContextConcatCTM( context, transform );
CGContextDrawImage( UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef );
UIImage *returnImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return returnImage;
}
+ (UIImage *)scaleAndRotateImageFrontCamera:(UIImage *)image
{
static int kMaxResolution = 640;
CGImageRef imgRef = image.CGImage;
CGFloat width = CGImageGetWidth(imgRef);
CGFloat height = CGImageGetHeight(imgRef);
CGAffineTransform transform = CGAffineTransformIdentity;
CGRect bounds = CGRectMake( 0, 0, width, height);
if (width > kMaxResolution || height > kMaxResolution) {
CGFloat ratio = width/height;
if (ratio > 1) {
bounds.size.width = kMaxResolution;
bounds.size.height = bounds.size.width / ratio;
} else {
bounds.size.height = kMaxResolution;
bounds.size.width = bounds.size.height * ratio;
}
}
CGFloat scaleRatio = bounds.size.width / width;
CGSize imageSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef));
CGFloat boundHeight;
UIImageOrientation orient = image.imageOrientation;
switch(orient) {
case UIImageOrientationUp:
transform = CGAffineTransformIdentity;
break;
case UIImageOrientationUpMirrored:
transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
break;
case UIImageOrientationDown:
transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationDownMirrored:
transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
break;
case UIImageOrientationLeftMirrored:
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationLeft:
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeScale(-1.0, 1.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
default:
[NSException raise:NSInternalInconsistencyException format:#"Invalid image orientation"];
}
UIGraphicsBeginImageContext( bounds.size );
CGContextRef context = UIGraphicsGetCurrentContext();
if ( orient == UIImageOrientationRight || orient == UIImageOrientationLeft ) {
CGContextScaleCTM(context, -scaleRatio, scaleRatio);
CGContextTranslateCTM(context, -height, 0);
}
else {
CGContextScaleCTM(context, scaleRatio, -scaleRatio);
CGContextTranslateCTM(context, 0, -height);
}
CGContextConcatCTM( context, transform );
CGContextDrawImage( UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef );
UIImage *returnImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return returnImage;
}
#end
This is code I used to find several marker inside an image and push their center points into a std::vector:
#import "UIImageCVMatConverter.h"
...
cv::Mat src_img;
cv::Mat result_mat;
cv::Mat debug_img;
cv::Mat template_img;
NSArray *markerImages = [NSArray arrayWithObjects:#"marker-1.png", nil];
std::vector<cv::Point> markerPoints;
// input image
src_img = [UIImageCVMatConverter cvMatFromUIImage:cameriaInputImage];
cv::cvtColor(src_img, debug_img, CV_GRAY2BGR);
for (NSString *marker in markerImages) {
template_img = [UIImageCVMatConverter cvMatFromUIImage:[UIImage imageNamed:marker]];
cv::cvtColor(template_img, template_img, CV_GRAY2BGR);
int match_method = CV_TM_CCORR_NORMED;
cv::matchTemplate(src_img, template_img, result_mat, match_method);
cv::normalize(result_mat, result_mat, 0, 1, cv::NORM_MINMAX, -1, cv::Mat());
double minVal;
double maxVal;
cv::Point minLoc, maxLoc, matchLoc;
cv::minMaxLoc(result_mat, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat() );
if ( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED ) {
matchLoc = minLoc;
}
else {
matchLoc = maxLoc;
}
cv::Point top_left = matchLoc;
cv::Point bottom_right = cv::Point(matchLoc.x + template_img.cols , matchLoc.y + template_img.rows);
cv::Point center = cv::Point(0,0);
center.x = (bottom_right.x + top_left.x) / 2;
center.y = (bottom_right.y + top_left.y) / 2;
markerPoints.push_back(center);
}
I hope that helps …

How to crop the image in iPhone

I want to do the same thing as asked in this question.
In my App i want to crop the image like we do image cropping in FaceBook can any one guide me with the link of good tutorial or with any sample code. The Link which i have provided will completely describe my requirement.
You may create new image with any properties. Here is my function, witch do that. you just need to use your own parameters of new image. In my case, image is not cropped, I just making some effect, moving pixels from there original place to another. But if you initialize new image with another height and width, you can just copy from any range of pixels of old image you need, to new one:
-(UIImage *)Color:(UIImage *)img
{
int R;
float m_width = img.size.width;
float m_height = img.size.height;
if (m_width>m_height) R = m_height*0.9;
else R = m_width*0.9;
int m_wint = (int)m_width; //later, we will need this parameters in float and int. you may just use "(int)" and "(float)" before variables later, and do not implement another ones
int m_hint = (int)m_height;
CGRect imageRect;
//cheking image orientation. we will work with image pixel-by-pixel, so we need to make top side at the top.
if(img.imageOrientation==UIImageOrientationUp
|| img.imageOrientation==UIImageOrientationDown)
{
imageRect = CGRectMake(0, 0, m_wint, m_hint);
}
else
{
imageRect = CGRectMake(0, 0, m_hint, m_wint);
}
uint32_t *rgbImage = (uint32_t *) malloc(m_wint * m_hint * sizeof(uint32_t));
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(rgbImage, m_wint, m_hint, 8, m_wint *sizeof(uint32_t), colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipLast);
CGContextSetInterpolationQuality(context, kCGInterpolationHigh);
CGContextSetShouldAntialias(context, NO);
CGContextTranslateCTM(context, 0, m_hint);
CGContextScaleCTM(context, 1.0, -1.0);
switch (img.imageOrientation) {
case UIImageOrientationRight:
{
CGContextRotateCTM(context, M_PI / 2);
CGContextTranslateCTM(context, 0, -m_wint);
}break;
case UIImageOrientationLeft:
{
CGContextRotateCTM(context, - M_PI / 2);
CGContextTranslateCTM(context, -m_hint, 0);
}break;
case UIImageOrientationUp:
{
CGContextTranslateCTM(context, m_wint, m_hint);
CGContextRotateCTM(context, M_PI);
}
default:
break;
}
CGContextDrawImage(context, imageRect, img.CGImage);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
//here is new image. you can change m_wint and m_hint as you whant
uint8_t *result = (uint8_t *) calloc(m_wint * m_hint * sizeof(uint32_t), 1);
for(int y = 0; y < m_hint; y++) //new m_hint here
{
float fy=y;
double yy = (m_height*( asinf(m_height/(2*R))-asin(((m_height/2)-fy)/R) )) /
(2*asin(m_height/(2*R))); // (xx, yy) - coordinates of pixel of OLD image
for(int x = 0; x < m_wint; x++) //new m_wint here
{
float fx=x;
double xx = (m_width*( asin(m_width/(2*R))-asin(((m_width/2)-fx)/R) )) /
(2*asin(m_width/(2*R)));
uint32_t rgbPixel=rgbImage[(int)yy * m_wint + (int)xx];
int intRedSource = (rgbPixel>>24)&255;
int intGreenSource = (rgbPixel>>16)&255;
int intBlueSource = (rgbPixel>>8)&255;
result[(y * (int)m_wint + x) * 4] = 0;
result[(y * (int)m_wint + x) * 4 + 1] = intBlueSource;
result[(y * (int)m_wint + x) * 4 + 2] = intGreenSource;
result[(y * (int)m_wint + x) * 4 + 3] = intRedSource;
}
}
free(rgbImage);
colorSpace = CGColorSpaceCreateDeviceRGB();
context = CGBitmapContextCreate(result, m_wint, m_hint, 8, m_wint * sizeof(uint32_t), colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipLast ); //new m_wint and m_hint as well
CGImageRef image1 = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
UIImage *resultUIImage = [UIImage imageWithCGImage:image1];
CGImageRelease(image1);
#try {
free(result);
}
#catch (NSException * e) {
NSLog(#"proc. Exception: %#", e);
}
return resultUIImage;
}
CGRect rectImage = CGRectMake(p1.x,p1.y, p2.x - p1.x, p4.y - p1.y);
//Create bitmap image from original image data,
//using rectangle to specify desired crop area
CGImageRef imageRef = CGImageCreateWithImageInRect([imageForCropping CGImage], rectImage);
UIImage *croppedImage = [UIImage imageWithCGImage:imageRef];
imageView1 = [[UIImageView alloc] initWithFrame:CGRectMake(p1.x, p1.y,p2.x-p1.x p4.y-p1.y)];
imageView1.image = croppedImage;
[self.view addSubview:imageView1];
CGImageRelease(imageRef);

Iphone Converting IplImage to UIImage and back causes rotation

I am using some code to convert between iplimage and uiimage. I take a photo taken from the camera (a UIImage) and convert it to an iplimage and back using the code posted below. Unfortunately this causes the image to be rotated and stretched by 90 degrees.
so if I take a 320x480 image it comes back a 320x480 image but the image has been rotated and rescaled so that it looks as if it were rotated 90 degrees (i.e a a 480x320 image) then scaled non-uniformly ... I cannot figure it out -- everything seems right (byte ordering etc.)
+(IplImage *)CreateIplImageFromUIImage:(UIImage *)image {
CGImageRef imageRef = image.CGImage;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
IplImage *iplimage = cvCreateImage(cvSize(image.size.width, image.size.height), IPL_DEPTH_8U, 4);
CGContextRef contextRef = CGBitmapContextCreate(iplimage->imageData, iplimage->width, iplimage->height,
iplimage->depth, iplimage->widthStep,
colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, image.size.width, image.size.height), imageRef);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
return iplimage;
}
+(UIImage *)UIImageFromIplImage:(IplImage *)image {
NSLog(#"IplImage (%d, %d) %d bits by %d channels, %d bytes/row %s", image->width, image->height, image->depth, image->nChannels, image->widthStep, image->channelSeq);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
NSData *data = [NSData dataWithBytes:image->imageData length:image->imageSize];
CGDataProviderRef provider = CGDataProviderCreateWithCFData((CFDataRef)data);
CGImageRef imageRef = CGImageCreate(image->width, image->height,
image->depth, image->depth * image->nChannels, image->widthStep,
colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault,
provider, NULL, false, kCGRenderingIntentDefault);
UIImage *ret = [UIImage imageWithCGImage:imageRef scale:1.0 orientation:UIImageOrientationRight];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return ret;
}
I had the same problem, too and I still didn't find out how to change the two methods so that they do not rotate the picture.
Instead I rotated the UIImage, e.g. my code looks like this:
//Change the rotation here
UIImage *imageCam= [UIImage imageWithCGImage:newImage scale:1.0 orientation:UIImageOrientationUp];
//Convert UIImage
IplImage *image = [self CreateIplImageFromUIImage:imageCam];
Just change
UIImage *ret = [UIImage imageWithCGImage:imageRef scale:1.0 orientation:UIImageOrientationRight];
into:
UIImage *ret = [UIImage imageWithCGImage:imageRef scale:1.0 orientation:UIImageOrientationUp];
I had the same problem and I found the solution - change CreateIplImageFromUIImage like this:
- (IplImage *)CreateIplImageFromUIImage:(UIImage *)image {
CGImageRef imageRef = [self rotateImage:image].CGImage;
...........
}
- (UIImage* )rotateImage:(UIImage *)image {
int kMaxResolution = 320;
// Or whatever
CGImageRef imgRef = image.CGImage;
CGFloat width = CGImageGetWidth(imgRef);
CGFloat height = CGImageGetHeight(imgRef);
CGAffineTransform transform = CGAffineTransformIdentity;
CGRect bounds = CGRectMake(0, 0, width, height);
if (width > kMaxResolution || height > kMaxResolution) {
CGFloat ratio = width / height;
if (ratio > 1 ) {
bounds.size.width = kMaxResolution;
bounds.size.height = bounds.size.width / ratio;
}
else {
bounds.size.height = kMaxResolution;
bounds.size.width = bounds.size.height * ratio;
}
}
CGFloat scaleRatio = bounds.size.width / width;
CGSize imageSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef));
CGFloat boundHeight;
UIImageOrientation orient = image.imageOrientation;
switch (orient) {
case UIImageOrientationUp:
//EXIF = 1
transform = CGAffineTransformIdentity;
break;
case UIImageOrientationUpMirrored:
//EXIF = 2
transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
transform = CGAffineTransformScale(transform, -1.0, 1.0 );
break;
case UIImageOrientationDown:
//EXIF = 3
transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationDownMirrored:
//EXIF = 4
transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
break;
case UIImageOrientationLeftMirrored:
//EXIF = 5
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width );
transform = CGAffineTransformScale(transform, -1.0, 1.0);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0 );
break;
case UIImageOrientationLeft:
//EXIF = 6
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
transform = CGAffineTransformRotate( transform, 3.0 * M_PI / 2.0 );
break;
case UIImageOrientationRightMirrored:
//EXIF = 7
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeScale(-1.0, 1.0);
transform = CGAffineTransformRotate( transform, M_PI / 2.0);
break;
case UIImageOrientationRight:
//EXIF = 8
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0 );
break;
default:
[NSException raise:NSInternalInconsistencyException format:#"Invalid image orientation"];
}
UIGraphicsBeginImageContext(bounds.size);
CGContextRef context = UIGraphicsGetCurrentContext();
if (orient == UIImageOrientationRight || orient == UIImageOrientationLeft) {
CGContextScaleCTM(context, -scaleRatio, scaleRatio);
CGContextTranslateCTM(context, -height, 0);
}
else {
CGContextScaleCTM(context, scaleRatio, -scaleRatio);
CGContextTranslateCTM(context, 0, -height);
}
CGContextConcatCTM(context, transform );
CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef);
UIImage *imageCopy = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return imageCopy;
}

iPhone: AVCaptureSession capture output crashing (AVCaptureVideoDataOutput)

I'm capturing video and converting it to a CGImage to do processing on it. It will work for a ~10 seconds, get memory warning and then crash (usually it says data formatters were temporarily unavailable). Can someone help me solve the problem?
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
// CONVERT CMSAMPLEBUFFER INTO A CGIMAGE
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer,0);
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef cgimage = CGBitmapContextCreateImage(newContext);
UIImage *sourceImage= [UIImage imageWithCGImage:cgimage scale:1.0f orientation:UIImageOrientationLeftMirrored];
CGImageRelease(cgimage);
CGContextRelease(newContext);
CGColorSpaceRelease(colorSpace);
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
// ***
// Adding code after this point to do image transformation usually causes it to crash
UIImage *rot = [self scaleAndRotateImage:sourceImage];
self.detectImage = rot;
}
Code to transform the image....
- (UIImage*)scaleAndRotateImage:(UIImage *)image{
int kMaxResolution = 320; // Or whatever
CGImageRef imgRef = image.CGImage;
CGFloat width = CGImageGetWidth(imgRef);
CGFloat height = CGImageGetHeight(imgRef);
CGAffineTransform transform = CGAffineTransformIdentity;
CGRect bounds = CGRectMake(0, 0, width, height);
if (width > kMaxResolution || height > kMaxResolution) {
CGFloat ratio = width/height;
if (ratio > 1) {
bounds.size.width = kMaxResolution;
bounds.size.height = bounds.size.width / ratio;
}
else {
bounds.size.height = kMaxResolution;
bounds.size.width = bounds.size.height * ratio;
}
}
CGFloat scaleRatio = bounds.size.width / width;
CGSize imageSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef));
CGFloat boundHeight;
UIImageOrientation orient = image.imageOrientation;
switch(orient) {
case UIImageOrientationUp: //EXIF = 1
transform = CGAffineTransformIdentity;
break;
case UIImageOrientationUpMirrored: //EXIF = 2
transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
break;
case UIImageOrientationDown: //EXIF = 3
transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationDownMirrored: //EXIF = 4
transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
break;
case UIImageOrientationLeftMirrored: //EXIF = 5
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.height);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationLeft: //EXIF = 6
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationRightMirrored: //EXIF = 7
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeScale(-1.0, 1.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
case UIImageOrientationRight: //EXIF = 8
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
default:
[NSException raise:NSInternalInconsistencyException format:#"Invalid image orientation"];
}
UIGraphicsBeginImageContext(bounds.size);
CGContextRef context = UIGraphicsGetCurrentContext();
if (orient == UIImageOrientationRight || orient == UIImageOrientationLeft) {
CGContextScaleCTM(context, -scaleRatio, scaleRatio);
CGContextTranslateCTM(context, -height, 0);
}
else {
CGContextScaleCTM(context, scaleRatio, -scaleRatio);
CGContextTranslateCTM(context, 0, -height);
}
CGContextConcatCTM(context, transform);
CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef);
UIImage *imageCopy = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
//[self setRotatedImage:imageCopy];
return imageCopy;
}
This function is just background to see how I setup the video output...
AVCaptureVideoDataOutput *videoOut = [[AVCaptureVideoDataOutput alloc] init];
[videoOut setAlwaysDiscardsLateVideoFrames:YES];
[videoOut setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey]]; // BGRA is necessary for manual preview
dispatch_queue_t my_queue = dispatch_queue_create("com.example.subsystem.taskXYZ", NULL);
[videoOut setSampleBufferDelegate:self queue:my_queue];
if ([self.captureSession canAddOutput:videoOut]) [self.captureSession addOutput:videoOut];
else NSLog(#"Couldn't add video output");
[videoOut release];
I had a similar issue. What ended up happening was that the queue was filling up with unprocessed frames, because I wasn't processing fast enough in the delegate object.
My solution was to do (once per processed frame):
proctr++;
if ((proctr % 20) == 0) {
deferImageProcessing = true;
dispatch_sync(queue, ^{
[self queueFlushed];
});
}
- (void)queueFlushed {
deferImageProcessing = false;
}
Then, in the actual image processing code
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection
{
if (deferImageProcessing)
return;
// do whatever else I'm doing...
}
Essentially, we occasionally suspend image processing until the queue empties.
I hope this is useful.
In setting up the video output, the newly created dispatch queue is not released. You can release it with
dispatch_release(queue);
But I don't believe this function gets called too often, so the leak probably originates elsewhere. Browsed your code several times, couldn't find any other culprits...
Have you tried searching the leak with the Leaks instrument tool?