How to efficiently and fast blur an image on the iPhone? - iphone

If I have a UIImage or CGContextRef or the pure bitmap data (direct access to decompressed ARGB-8 pixels), what's my best option to blur an image with radius 10 pixels as fast as possible?

I've implemented a stackBlur algorithm for iOS, which is close to GaussianBlur but very fast:
https://github.com/tomsoft1/StackBluriOS
Check for instance here:
Blur an UIImage on change of slider

Either use a stack blur, a box blur or use the OpenGL texture blur (google the first two, and check the Apple dev samples for the latter).

https://github.com/rnystrom/RNBlurModalView
-(UIImage *)boxblurImageWithBlur:(CGFloat)blur bluringImage : (UIImage *) image
{
int boxSize = (int)(blur * 40);
boxSize = boxSize - (boxSize % 2) + 1;
CGImageRef img = image.CGImage;
enter code here
vImage_Buffer inBuffer, outBuffer;
vImage_Error error;
void *pixelBuffer;
//create vImage_Buffer with data from CGImageRef
CGDataProviderRef inProvider = CGImageGetDataProvider(img);
CFDataRef inBitmapData = CGDataProviderCopyData(inProvider);
inBuffer.width = CGImageGetWidth(img);
inBuffer.height = CGImageGetHeight(img);
inBuffer.rowBytes = CGImageGetBytesPerRow(img);
inBuffer.data = (void*)CFDataGetBytePtr(inBitmapData);
//create vImage_Buffer for output
pixelBuffer = malloc(CGImageGetBytesPerRow(img) * CGImageGetHeight(img));
if(pixelBuffer == NULL)
NSLog(#"No pixelbuffer");
outBuffer.data = pixelBuffer;
outBuffer.width = CGImageGetWidth(img);
outBuffer.height = CGImageGetHeight(img);
outBuffer.rowBytes = CGImageGetBytesPerRow(img);
// Create a third buffer for intermediate processing
void *pixelBuffer2 = malloc(CGImageGetBytesPerRow(img) * CGImageGetHeight(img));
vImage_Buffer outBuffer2;
outBuffer2.data = pixelBuffer2;
outBuffer2.width = CGImageGetWidth(img);
outBuffer2.height = CGImageGetHeight(img);
outBuffer2.rowBytes = CGImageGetBytesPerRow(img);
//perform convolution
error = vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer2, NULL, 0, 0, boxSize, boxSize, NULL, kvImageEdgeExtend);
error = vImageBoxConvolve_ARGB8888(&outBuffer2, &inBuffer, NULL, 0, 0, boxSize, boxSize, NULL, kvImageEdgeExtend);
error = vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer, NULL, 0, 0, boxSize, boxSize, NULL, kvImageEdgeExtend);
if (error) {
NSLog(#"error from convolution %ld", error);
}
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef ctx = CGBitmapContextCreate(outBuffer.data,
outBuffer.width,
outBuffer.height,
8,
outBuffer.rowBytes,
colorSpace,
kCGImageAlphaNoneSkipLast);
CGImageRef imageRef = CGBitmapContextCreateImage (ctx);
UIImage *returnImage = [UIImage imageWithCGImage:imageRef];
//clean up
CGContextRelease(ctx);
CGColorSpaceRelease(colorSpace);
free(pixelBuffer);
CFRelease(inBitmapData);
CGImageRelease(imageRef);
return returnImage;
}

Related

OpenCV iOS - Not the same result on simulator and on iPhone

I have a problem with cropping image using OpenCV library on a real iPhone.
I have an image with selected area and I'd like to apply a perspective transform on the image with this area. This works fine on the simulator but on the iphone the new image isn't map to rectangle and the new image has also blue color.
Here is my code :
+ (IplImage *)CreateIplImageFromUIImage:(UIImage *)image {
// Getting CGImage from UIImage
CGImageRef imageRef = image.CGImage;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// Creating temporal IplImage for drawing
IplImage *iplimage = cvCreateImage(
cvSize(image.size.width,image.size.height), IPL_DEPTH_8U, 4
);
// Creating CGContext for temporal IplImage
CGContextRef contextRef = CGBitmapContextCreate(
iplimage->imageData, iplimage->width, iplimage->height,
iplimage->depth, iplimage->widthStep,
colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault
);
// Drawing CGImage to CGContext
CGContextDrawImage(
contextRef,
CGRectMake(0, 0, image.size.width, image.size.height),
imageRef
);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
// Creating result IplImage
IplImage *ret = cvCreateImage(cvGetSize(iplimage), IPL_DEPTH_8U, 3);
cvCvtColor(iplimage, ret, CV_RGBA2BGR);
cvReleaseImage(&iplimage);
return ret;
}
+ (UIImage *)UIImageFromIplImage:(IplImage *)img {
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// Creating result IplImage
IplImage *image = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 4);
cvCvtColor(img, image, CV_BGR2RGBA);
// Allocating the buffer for CGImage
NSData *data =
[NSData dataWithBytes:image->imageData length:image->imageSize];
CGDataProviderRef provider =
CGDataProviderCreateWithCFData((CFDataRef)data);
// Creating CGImage from chunk of IplImage
CGImageRef imageRef = CGImageCreate(
image->width, image->height,
image->depth, image->depth * image->nChannels, image->widthStep,
colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault,
provider, NULL, false, kCGRenderingIntentDefault
);
// Getting UIImage from CGImage
UIImage *ret = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return ret;
}
+ (UIImage *)perspectiveTransform: (UIImage*) originalImage :(CGFloat) h :(CGFloat) w :(CGPoint) point1 :(CGPoint) point2 :(CGPoint) point3 :(CGPoint) point4
{
CvPoint2D32f srcQuad[4];
srcQuad[0].x = point1.x;
srcQuad[0].y = point1.y;
srcQuad[1].x = point2.x;
srcQuad[1].y = point2.y;
srcQuad[2].x = point3.x;
srcQuad[2].y = point3.y;
srcQuad[3].x = point4.x;
srcQuad[3].y = point4.y;
IplImage *src = [self CreateIplImageFromUIImage:originalImage];
IplImage *dst = cvCreateImage(cvGetSize(src),
src->depth,
src->nChannels);
cvZero(dst);
CGFloat width = src->width;
CGFloat height = src->height;
CvMat* mmat = cvCreateMat(3, 3, CV_32FC1);
CvPoint2D32f *c1 = (CvPoint2D32f *)malloc(4 * sizeof(CvPoint2D32f));
CvPoint2D32f *c2 = (CvPoint2D32f *)malloc(4 * sizeof(CvPoint2D32f));
c1[0].x = round((width/w)*srcQuad[0].x); c1[0].y = round((height/h)*srcQuad[0].y);
c1[1].x = round((width/w)*srcQuad[1].x); c1[1].y = round((height/h)*srcQuad[1].y);
c1[2].x = round((width/w)*srcQuad[2].x); c1[2].y = round((height/h)*srcQuad[2].y);
c1[3].x = round((width/w)*srcQuad[3].x); c1[3].y = round((height/h)*srcQuad[3].y);
c2[0].x = 0; c2[0].y = 0;
c2[1].x = width - 1; c2[1].y = 0;
c2[2].x = 0; c2[2].y = height - 1;
c2[3].x = width - 1; c2[3].y = height - 1;
mmat = cvGetPerspectiveTransform(c1, c2, mmat);
free(c1);
free(c2);
cvWarpPerspective(src, dst, mmat, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
cvReleaseImage(&src);
cvReleaseMat(&mmat);
UIImage *newImage = [self UIImageFromIplImage:dst];
cvReleaseImage(&dst);
return newImage;
}
Thanks for your help !

Does exist something like cvShowImage for iOS?

I'm processing images (using AVFoundation and OpenCV on iOS) and I want to simply display contents of CMSampleBufferRef (or IplImage) to screen.
Simply: I just want to display (like with OpenCV's cvShowImage()) non-converted image to see if I'm not dealing with corrupted or somehow deformed image.
Sadly not. Different bitmap representation.
Perhaps you want a category? I use something along the lines of this:
// NSImage+OpenCV.h
#interface NSImage (OpenCV)
+ (NSImage*)imageWithCVMat:(const cv::Mat&)cvMat;
- (id)initWithCVMat:(const cv::Mat&)cvMat;
- (cv::Ptr<cv::Mat>)cvMat;
#end
// NSImage+OpenCV.m
using namespace cv;
#implementation NSImage (OpenCV)
- (id)initWithCVMat:(const cv::Mat&)cvMat {
NSData *data = [NSData dataWithBytes:cvMat.data
length:cvMat.total()*cvMat.elemSize()];
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGColorSpaceRef colourSpace = CGColorSpaceCreateDeviceRGB();
CGImageRef imageRef = CGImageCreate(cvMat.cols,
cvMat.rows,
8,
8 * cvMat.elemSize(),
cvMat.step[0],
colourSpace,
kCGImageAlphaNone |
kCGBitmapByteOrderDefault,
provider,
NULL,
false,
kCGRenderingIntentDefault);
NSImage *image = [[NSImage alloc] initWithCGImage:imageRef size:CGSizeMake(cvMat.cols,cvMat.rows)];
CGColorSpaceRelease(colourSpace);
CGDataProviderRelease(provider);
CGImageRelease(imageRef);
return image;
}
+(NSImage*)imageWithCVMat:(const cv::Mat&)cvMat {
return [[NSImage alloc] initWithCVMat:cvMat];
}
- (cv::Ptr<cv::Mat>)cvMat {
CGImageSourceRef source = CGImageSourceCreateWithData((__bridge CFDataRef)[self TIFFRepresentation], NULL);
CGImageRef imageRef = CGImageSourceCreateImageAtIndex(source, 0, NULL);
cv::Ptr<cv::Mat> cvMat = new cv::Mat(self.size.height, self.size.width, CV_8UC4);
CGContextRef contextRef = CGBitmapContextCreate(cvMat->data,
cvMat->cols,
cvMat->rows,
8,
cvMat->step[0],
CGImageGetColorSpace(imageRef),
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef,
CGRectMake(0, 0, cvMat->cols, cvMat->rows),
imageRef);
CGContextRelease(contextRef);
CGImageRelease(imageRef);
return cvMat;
}
#end

Create UIImage from CMSampleBufferRef using video format kCVPixelFormatType_420YpCbCr8BiPlanarFullRange

Im capturing from the iphone camera device using CVPixelFormatType_420YpCbCr8BiPlanarFullRange for faster processing of the grayscale plane (plane 0).
I am storing an amount of frames in memory for later video creation. Before I was creating the video in grayscale so I was storing only the plane that contains the luminiscense (plane 0).
Now I have to store both planes and also create a color video from them, for storing the frames I use something like this :
bytesInFrame = width * height * 2; //2 bytes per pixel, is that correct?
frameData = (unsigned char*) malloc(bytesInFrame * numbeOfFrames);
The function for creating the image from the grayscale buffer I was using :
- (UIImage *) convertBitmapGrayScaleToUIImage:(unsigned char *) buffer
withWidth:(int) width
withHeight:(int) height {
size_t bufferLength = width * height * 1;
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer, bufferLength, NULL);
size_t bitsPerComponent = 8;
size_t bitsPerPixel = 8;
size_t bytesPerRow = 1 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceGray();
if(colorSpaceRef == NULL) {
DLog(#"Error allocating color space");
CGDataProviderRelease(provider);
return nil;
}
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef iref = CGImageCreate(width,
height,
bitsPerComponent,
bitsPerPixel,
bytesPerRow,
colorSpaceRef,
bitmapInfo,
provider, // data provider
NULL, // decode
YES, // should interpolate
renderingIntent);
uint32_t* pixels = (uint32_t*)malloc(bufferLength);
if(pixels == NULL) {
DLog(#"Error: Memory not allocated for bitmap");
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
CGImageRelease(iref);
return nil;
}
CGContextRef context = CGBitmapContextCreate(pixels,
width,
height,
bitsPerComponent,
bytesPerRow,
colorSpaceRef,
bitmapInfo);
if(context == NULL) {
DLog(#"Error context not created");
free(pixels);
}
UIImage *image = nil;
if(context) {
CGContextDrawImage(context, CGRectMake(0.0f, 0.0f, width, height), iref);
CGImageRef imageRef = CGBitmapContextCreateImage(context);
image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGContextRelease(context);
}
CGColorSpaceRelease(colorSpaceRef);
CGImageRelease(iref);
CGDataProviderRelease(provider);
if(pixels) {
free(pixels);
}
return image;
}
I have seen that this question is similar to what I want to achieve :
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange frame to UIImage conversion
But I think this would add an extra step to my conversion.
There is any way of creating a color UIImage form the buffer directly?
I would appreciate some indications.

iOS How to perform rotation on a cgimage efficiently (up to 125 frames)?

I have two images, let's call them image A and image B. I'm trying to rotate and scale image A and to draw it on top of image B at specific location.
I use the function and method below to :
1. rotate and scale my image
2. Draw Image A into Image B at a specific location.
This code works fine but is rather slow, is there a way to improve it?
Function to rotate and scale :
CGImageRef rotateAndScaleImageCreate(const CGImageRef cgImage, const CGFloat radians,const CGFloat scalefactor){
CGImageRef rotatedImageRef = NULL;
const CGFloat originalWidth = CGImageGetWidth(cgImage)*scalefactor;
const CGFloat originalHeight = CGImageGetHeight(cgImage)*scalefactor;
const CGRect imgRect = (CGRect){.origin.x = 0.0f, .origin.y = 0.0f, .size.width = originalWidth, .size.height = originalHeight};
const CGRect rotatedRect = CGRectApplyAffineTransform(imgRect, CGAffineTransformMakeRotation(radians));
/// Create an ARGB bitmap context
CGContextRef bmContext = NYXImageCreateARGBBitmapContext(rotatedRect.size.width, rotatedRect.size.height, 0);
if (!bmContext)
return nil;
/// Rotation happen here
CGContextTranslateCTM(bmContext, +(rotatedRect.size.width * 0.5f), +(rotatedRect.size.height * 0.5f));
CGContextRotateCTM(bmContext, radians);
/// Draw the image in the bitmap context
CGContextDrawImage(bmContext, (CGRect){.origin.x = -originalWidth * 0.5f, .origin.y = -originalHeight * 0.5f, .size.width = originalWidth, .size.height = originalHeight}, cgImage);
/// Create an image object from the context
rotatedImageRef = CGBitmapContextCreateImage(bmContext);
/// Cleanup
//CGImageRelease(rotatedImageRef);
CGContextRelease(bmContext);
return rotatedImageRef;
}
Method to draw image at specific location within another image:
-(UIImage*) PositionImage:(CGImageRef)image backgroundImage:(CGImageRef)backgroundImage atPointX:(float)X pointY:(float)Y withScale:(float)scale andRotation:(float)rotation{
//////////////////////////////////////////
CGLayerRef layer;
CGImageRef resultImage;
CGContextRef context, layerContext;
void *bitmapData;
CGColorSpaceRef colorSpace;
CGSize canvasSize;
int bitmapByteCount;
int bitmapBytesPerRow;
//Get the background image
//UIImage * backgroundImg = [UIImage imageNamed:#"Mask-Inverted.png"];
//Initialize the canvas size!
canvasSize = CGSizeMake(480,640);//[backgroundImg size];
//
bitmapBytesPerRow = (canvasSize.width * 4);
bitmapByteCount = (bitmapBytesPerRow * canvasSize.height);
//Create the color space
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapData = malloc( bitmapByteCount );
//Check the the buffer is alloc'd
if( bitmapData == NULL ){
NSLog(#"Buffer could not be alloc'd");
}
//Create the context
context = CGBitmapContextCreate(bitmapData, canvasSize.width, canvasSize.height, 8, bitmapBytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast);
if( context == NULL ){
NSLog(#"Context could not be created");
}
///Create the Layer
layer = CGLayerCreateWithContext(context, canvasSize, NULL);
if( layer == NULL ){
NSLog(#"Layer could not be created");
}
///Create the layer Context
layerContext = CGLayerGetContext(layer);
if( layerContext == NULL){
NSLog(#"No Layer context");
}
//Draw the image background into the bitmap context
CGContextDrawImage(context, CGRectMake(0.0f,0.0f,480,640),backgroundImage);
//Rotate and Scale Image
CGImageRef rotatedImage =rotateAndScaleImageCreate(image,rotation,scale);
//Draw a image on the layer
CGContextDrawImage(layerContext,CGRectMake(0,0,CGImageGetWidth(rotatedImage),CGImageGetHeight(rotatedImage)),rotatedImage);
CGImageRelease(rotatedImage);
//Draw the layer in the context at the coordinate
CGContextDrawLayerAtPoint( context, CGPointMake(X-CGImageGetWidth(rotatedImage)*0.5f, Y-CGImageGetHeight(rotatedImage)*0.5f), layer);
CGLayerRelease(layer);
//Get the result image
resultImage = CGBitmapContextCreateImage(context);
CGContextRelease(context);
//Cleanup
free(bitmapData);
CGColorSpaceRelease(colorSpace);
//Create UIImage
UIImage *imageReturned = [UIImage imageWithCGImage:resultImage];
CGImageRelease(resultImage);
return imageReturned;
}
If you're creating lots of images at the same time, reuse the same CGContext. Also, don't create a new rotated image; rotate the image within the existing context.
Basically, you should only have to create a CGContext once, no matter how many images you end up making with it. That should speed things up significantly.

Image processing Glamour filter in iphone

I want to create an app in which i want to do some image processing. So I would like to know if there is any open-source image processing library available? also I would like to create a filter like this one Glamour Filter any help regarding this would be very much appreciated. If someone already have a source code to create sepia,black and white rotate scale code than please send. Thanks
Here is the code for sepia image
-(UIImage*)makeSepiaScale:(UIImage*)image
{
CGImageRef cgImage = [image CGImage];
CGDataProviderRef provider = CGImageGetDataProvider(cgImage);
CFDataRef bitmapData = CGDataProviderCopyData(provider);
UInt8* data = (UInt8*)CFDataGetBytePtr(bitmapData);
int width = image.size.width;
int height = image.size.height;
NSInteger myDataLength = width * height * 4;
for (int i = 0; i < myDataLength; i+=4)
{
UInt8 r_pixel = data[i];
UInt8 g_pixel = data[i+1];
UInt8 b_pixel = data[i+2];
int outputRed = (r_pixel * .393) + (g_pixel *.769) + (b_pixel * .189);
int outputGreen = (r_pixel * .349) + (g_pixel *.686) + (b_pixel * .168);
int outputBlue = (r_pixel * .272) + (g_pixel *.534) + (b_pixel * .131);
if(outputRed>255)outputRed=255;
if(outputGreen>255)outputGreen=255;
if(outputBlue>255)outputBlue=255;
data[i] = outputRed;
data[i+1] = outputGreen;
data[i+2] = outputBlue;
}
CGDataProviderRef provider2 = CGDataProviderCreateWithData(NULL, data, myDataLength, NULL);
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider2, NULL, NO, renderingIntent);
CGColorSpaceRelease(colorSpaceRef); // YOU CAN RELEASE THIS NOW
CGDataProviderRelease(provider2); // YOU CAN RELEASE THIS NOW
CFRelease(bitmapData);
UIImage *sepiaImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef); // YOU CAN RELEASE THIS NOW
return sepiaImage;
}
Here is the code for Black & White effect
- (UIImage*) createGrayCopy:(UIImage*) source {
int width = source.size.width;
int height = source.size.height;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
CGContextRef context = CGBitmapContextCreate (nil, width,
height,
8, // bits per component
0,
colorSpace,
kCGImageAlphaNone);
CGColorSpaceRelease(colorSpace);
if (context == NULL) {
return nil;
}
CGContextDrawImage(context,
CGRectMake(0, 0, width, height), source.CGImage);
UIImage *grayImage = [UIImage imageWithCGImage:CGBitmapContextCreateImage(context)];
CGContextRelease(context);
return grayImage;
}
Search for OpenCV