I have a problem with cropping image using OpenCV library on a real iPhone.
I have an image with selected area and I'd like to apply a perspective transform on the image with this area. This works fine on the simulator but on the iphone the new image isn't map to rectangle and the new image has also blue color.
Here is my code :
+ (IplImage *)CreateIplImageFromUIImage:(UIImage *)image {
// Getting CGImage from UIImage
CGImageRef imageRef = image.CGImage;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// Creating temporal IplImage for drawing
IplImage *iplimage = cvCreateImage(
cvSize(image.size.width,image.size.height), IPL_DEPTH_8U, 4
);
// Creating CGContext for temporal IplImage
CGContextRef contextRef = CGBitmapContextCreate(
iplimage->imageData, iplimage->width, iplimage->height,
iplimage->depth, iplimage->widthStep,
colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault
);
// Drawing CGImage to CGContext
CGContextDrawImage(
contextRef,
CGRectMake(0, 0, image.size.width, image.size.height),
imageRef
);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
// Creating result IplImage
IplImage *ret = cvCreateImage(cvGetSize(iplimage), IPL_DEPTH_8U, 3);
cvCvtColor(iplimage, ret, CV_RGBA2BGR);
cvReleaseImage(&iplimage);
return ret;
}
+ (UIImage *)UIImageFromIplImage:(IplImage *)img {
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// Creating result IplImage
IplImage *image = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 4);
cvCvtColor(img, image, CV_BGR2RGBA);
// Allocating the buffer for CGImage
NSData *data =
[NSData dataWithBytes:image->imageData length:image->imageSize];
CGDataProviderRef provider =
CGDataProviderCreateWithCFData((CFDataRef)data);
// Creating CGImage from chunk of IplImage
CGImageRef imageRef = CGImageCreate(
image->width, image->height,
image->depth, image->depth * image->nChannels, image->widthStep,
colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault,
provider, NULL, false, kCGRenderingIntentDefault
);
// Getting UIImage from CGImage
UIImage *ret = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return ret;
}
+ (UIImage *)perspectiveTransform: (UIImage*) originalImage :(CGFloat) h :(CGFloat) w :(CGPoint) point1 :(CGPoint) point2 :(CGPoint) point3 :(CGPoint) point4
{
CvPoint2D32f srcQuad[4];
srcQuad[0].x = point1.x;
srcQuad[0].y = point1.y;
srcQuad[1].x = point2.x;
srcQuad[1].y = point2.y;
srcQuad[2].x = point3.x;
srcQuad[2].y = point3.y;
srcQuad[3].x = point4.x;
srcQuad[3].y = point4.y;
IplImage *src = [self CreateIplImageFromUIImage:originalImage];
IplImage *dst = cvCreateImage(cvGetSize(src),
src->depth,
src->nChannels);
cvZero(dst);
CGFloat width = src->width;
CGFloat height = src->height;
CvMat* mmat = cvCreateMat(3, 3, CV_32FC1);
CvPoint2D32f *c1 = (CvPoint2D32f *)malloc(4 * sizeof(CvPoint2D32f));
CvPoint2D32f *c2 = (CvPoint2D32f *)malloc(4 * sizeof(CvPoint2D32f));
c1[0].x = round((width/w)*srcQuad[0].x); c1[0].y = round((height/h)*srcQuad[0].y);
c1[1].x = round((width/w)*srcQuad[1].x); c1[1].y = round((height/h)*srcQuad[1].y);
c1[2].x = round((width/w)*srcQuad[2].x); c1[2].y = round((height/h)*srcQuad[2].y);
c1[3].x = round((width/w)*srcQuad[3].x); c1[3].y = round((height/h)*srcQuad[3].y);
c2[0].x = 0; c2[0].y = 0;
c2[1].x = width - 1; c2[1].y = 0;
c2[2].x = 0; c2[2].y = height - 1;
c2[3].x = width - 1; c2[3].y = height - 1;
mmat = cvGetPerspectiveTransform(c1, c2, mmat);
free(c1);
free(c2);
cvWarpPerspective(src, dst, mmat, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
cvReleaseImage(&src);
cvReleaseMat(&mmat);
UIImage *newImage = [self UIImageFromIplImage:dst];
cvReleaseImage(&dst);
return newImage;
}
Thanks for your help !
Related
After using this function to crop image:
CGImageRef imageRef = CGImageCreateWithImageInRect([self.imageInput CGImage], rect);
UIImage *result = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
I bring "result" to use for process with pixel like that:
......
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
// Now your rawData contains the image data in the RGBA8888 pixel format.
int byteIndex = 0;
for (NSInteger i = 0 ; i < width * height; i++) {
int R = rawData[byteIndex];
int G = rawData[byteIndex + 1];
int B = rawData[byteIndex + 2];
int test = [self getValueof:0.3* R + 0.59* G + 0.11 * B inRange:0 to:255];
rawData[byteIndex] = (char)(test);
rawData[byteIndex + 1] = (char)(test);
rawData[byteIndex + 2] = (char)(test);
byteIndex += 4;
}
ctx = CGBitmapContextCreate(rawData,
CGImageGetWidth( imageRef ),
CGImageGetHeight( imageRef ),
8,
CGImageGetBytesPerRow( imageRef ),
CGImageGetColorSpace( imageRef ),
kCGImageAlphaPremultipliedLast );
imageRef = CGBitmapContextCreateImage (ctx);
UIImage* rawImage = [UIImage imageWithCGImage:imageRef];
CGContextRelease(ctx);
CGImageRelease(imageRef);
free(rawData);
I will have crash? Anyone know about it, please help me.
update crash report: : copy_read_only: vm_copy failed: status 1.
after a long time find solution for this crash. I got an experience:
- When process bitmap on iOS: if we don't notice orientation of image, maybe we got some strange crash.
With my crash above: I have fixed it very simple: change
UIImage *result = [UIImage imageWithCGImage:imageRef];
by
UIImage *result = [UIImage imageWithCGImage:imageRef scale:self.imageInput.scale orientation:self.imageInput.imageOrientation];
I'm using UIImagePickerController class. I have a button, when i click that button, i want image bright. I have searched online, but the code i found upon search is not working.
Here is the code i have found and used :
- (UIImage *) brightness:(UIImage *)image
{
int level= 200;//put here an int value that go from -255 to 255;
CGImageRef inImage = image.CGImage; //Input image cgi
CGContextRef ctx;
CFDataRef m_DataRef;
m_DataRef = CGDataProviderCopyData(CGImageGetDataProvider(inImage));
UInt8 * m_PixelBuf = (UInt8 *) CFDataGetBytePtr(m_DataRef);
int length = CFDataGetLength(m_DataRef);
CGImageGetBitsPerComponent(inImage), CGImageGetBitsPerPixel(inImage),CGImageGetBytesPerRow(inImage);
for (int index = 0; index < length; index += 4)
{
Byte tempR = m_PixelBuf[index + 1];
Byte tempG = m_PixelBuf[index + 2];
Byte tempB = m_PixelBuf[index + 3];
int outputRed = level + tempR;
int outputGreen = level + tempG;
int outputBlue = level + tempB;
if (outputRed>255) outputRed=255;
if (outputGreen>255) outputGreen=255;
if (outputBlue>255) outputBlue=255;
if (outputRed<0) outputRed=0;
if (outputGreen<0) outputGreen=0;
if (outputBlue<0) outputBlue=0;
m_PixelBuf[index + 1] = outputRed;
m_PixelBuf[index + 2] = outputGreen;
m_PixelBuf[index + 3] = outputBlue;
}
ctx = CGBitmapContextCreate(m_PixelBuf,
CGImageGetWidth( inImage ),
CGImageGetHeight( inImage ),
8,
CGImageGetBytesPerRow( inImage ),
CGImageGetColorSpace( inImage ),
kCGImageAlphaPremultipliedFirst );
CGImageRef imageRef = CGBitmapContextCreateImage (ctx);
UIImage* rawImage = [UIImage imageWithCGImage:imageRef];
CGContextRelease(ctx);
CGImageRelease(imageRef);
CFRelease(m_DataRef);
return rawImage;
}
Image is still at original image's brightness, it is not changing.
In general the idea looks good, probably there is an error with updating pixel data.
But to change brightness I suggest to use CoreGraphics instead of soft processing:
CGFloat brightness = 0.5;
UIGraphicsBeginImageContext(image.size);
CGRect imageRect = CGRectMake(0, 0, image.size.width, image.size.height);
CGContextRef context = UIGraphicsGetCurrentContext();
// Original image
[image drawInRect:imageRect];
// Brightness overlay
CGContextSetFillColorWithColor(context, [UIColor colorWithRed:1.0 green:1.0 blue:1.0 alpha:brightness].CGColor);
CGContextAddRect(context, imageRect);
CGContextFillPath(context);
UIImage* resultImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
You can change brightness overlay color to get proper results.
I am having an issue with some code that changes a UIImage to grayscale. It works right on iPhone/iPod, but on iPad whatever is already drawn gets all stretched and skewed in the process.
It also sometimes crashes only on iPad on the line
imageRef = CGBitmapContextCreateImage (ctx);
Here is the code:
CGContextRef ctx;
CGImageRef imageRef = [self.drawImage.image CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = malloc(height * width * 4);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
int byteIndex = 0;
int grayScale;
for(int ii = 0 ; ii < width * height ; ++ii)
{
grayScale = (rawData[byteIndex] + rawData[byteIndex + 1] + rawData[byteIndex + 2]) / 3;
rawData[byteIndex] = (char)grayScale;
rawData[byteIndex+1] = (char)grayScale;
rawData[byteIndex+2] = (char)grayScale;
//rawData[byteIndex+3] = 255;
byteIndex += 4;
}
ctx = CGBitmapContextCreate(rawData,
CGImageGetWidth( imageRef ),
CGImageGetHeight( imageRef ),
8,
CGImageGetBytesPerRow( imageRef ),
CGImageGetColorSpace( imageRef ),
kCGImageAlphaPremultipliedLast );
imageRef = CGBitmapContextCreateImage (ctx);
UIImage* rawImage = [UIImage imageWithCGImage:imageRef];
CGContextRelease(ctx);
self.drawImage.image = rawImage;
free(rawData);
Consider leveraging the CIColorMonochrome filter available in iOS 6.
void ToMonochrome()
{
var mono = new CIColorMonochrome();
mono.Color = CIColor.FromRgb(1, 1, 1);
mono.Intensity = 1.0f;
var uiImage = new UIImage("Images/photo.jpg");
mono.Image = CIImage.FromCGImage(uiImage.CGImage);
DisplayFilterOutput(mono, ImageView);
}
static void DisplayFilterOutput(CIFilter filter, UIImageView imageView)
{
CIImage output = filter.OutputImage;
if (output == null)
return;
var context = CIContext.FromOptions(null);
var renderedImage = context.CreateCGImage(output, output.Extent);
var finalImage = new UIImage(renderedImage);
imageView.Image = finalImage;
}
Figured it out with some help elsewhere
Got rid of an extra context being used and changed the bitmap format
CGContextRef ctx;
CGImageRef imageRef = [self.drawImage.image CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = malloc(height * width * 4);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = CGImageGetBitsPerComponent(imageRef);
ctx = CGBitmapContextCreate(rawData,
width,
height,
bitsPerComponent,
bytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Host);
CGContextDrawImage(ctx, CGRectMake(0, 0, width, height), imageRef);
int byteIndex = 0;
int grayScale;
for(int ii = 0 ; ii < width * height ; ++ii)
{
grayScale = (rawData[byteIndex] + rawData[byteIndex + 1] + rawData[byteIndex + 2]) / 3;
rawData[byteIndex] = (char)grayScale;
rawData[byteIndex+1] = (char)grayScale;
rawData[byteIndex+2] = (char)grayScale;
//rawData[byteIndex+3] = 255;
byteIndex += 4;
}
imageRef = CGBitmapContextCreateImage(ctx);
UIImage* rawImage = [UIImage imageWithCGImage:imageRef];
CGColorSpaceRelease(colorSpace);
CGContextRelease(ctx);
self.drawImage.image = rawImage;
free(rawData);
I’m using GLPaint example for my Paint app , I needed to take a screenshot of OpenGL ES [CAEAGLLayer] rendered content. I am using function:
-(UIImage *)snapUIImage
{
int s = 2;
const int w = self.frame.size.width;
const int h = self.frame.size.height;
const NSInteger myDataLength = w * h * 4 * s * s;
// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc(myDataLength);
glReadPixels(0, 0, w*s, h*s, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
for(int y = 0; y < h*s; y++)
{
memcpy( buffer2 + (h*s - 1 - y) * w * 4 * s, buffer + (y * 4 * w * s), w * 4 * s );
}
free(buffer); // work with the flipped buffer, so get rid of the original one.
// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);
// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * w * s;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault | kCGImageAlphaLast;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
// make the cgimage
CGImageRef imageRef = CGImageCreate(w*s, h*s, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
// then make the uiimage from that
UIImage *myImage = [ UIImage imageWithCGImage:imageRef scale:s orientation:UIImageOrientationUp ];
CGImageRelease( imageRef );
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
// buffer2 will be destroyed once myImage is autoreleased.
return myImage;
}
-(void)captureToPhotoAlbum {
UIImage *image = [self snapUIImage];
UIImageWriteToSavedPhotosAlbum(image, self, nil, nil);
}
Above code is working I am getting the image but it is not a high resolution [Retina display] image.Please help.
Thanks in advance.
Working code solution
- (UIImage*) getGLScreenshot
{
int myWidth = self.frame.size.width*2;
int myHeight = self.frame.size.height*2;
int myY = 0;
int myX = 0;
int bufferLength = (myWidth*myHeight*4);
//unsigned char buffer[bufferLength];
unsigned char* buffer =(unsigned char*)malloc(bufferLength);
glReadPixels(myX, myY, myWidth, myHeight, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
CGDataProviderRef ref = CGDataProviderCreateWithData(NULL, buffer, bufferLength, NULL);
CGImageRef iref = CGImageCreate(myWidth,myHeight,8,32,myWidth*4,CGColorSpaceCreateDeviceRGB(),
kCGBitmapByteOrderDefault,ref,NULL, true, kCGRenderingIntentDefault);
uint32_t* pixels = (uint32_t *)malloc(bufferLength);
CGContextRef context = CGBitmapContextCreate(pixels, myWidth, myHeight, 8, myWidth*4, CGImageGetColorSpace(iref),
kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Big);
CGContextTranslateCTM(context, 0.0, myHeight);
CGContextScaleCTM(context, 1.0, -1.0);
CGContextDrawImage(context, CGRectMake(0.0, 0.0, myWidth, myHeight), iref);
CGImageRef outputRef = CGBitmapContextCreateImage(context);
UIImage *image = nil;
if(regardOrientation) {
UIDeviceOrientation deviceOrientation = [UIDevice currentDevice].orientation;
if (deviceOrientation == UIDeviceOrientationPortraitUpsideDown) {
image = [UIImage imageWithCGImage:outputRef scale:1 orientation:UIImageOrientationDown];
} else if (deviceOrientation == UIDeviceOrientationLandscapeLeft) {
image = [UIImage imageWithCGImage:outputRef scale:1 orientation:UIImageOrientationLeft];
} else if (deviceOrientation == UIDeviceOrientationLandscapeRight) {
image = [UIImage imageWithCGImage:outputRef scale:1 orientation:UIImageOrientationRight];
} else {
image = [UIImage imageWithCGImage:outputRef scale:1 orientation:UIImageOrientationUp];
}
} else {
image = [UIImage imageWithCGImage:outputRef scale:1 orientation:UIImageOrientationUp];
}
image = [UIImage imageWithCGImage:outputRef scale:1 orientation:UIImageOrientationUp];
CGImageRelease(iref);
CGImageRelease(outputRef);
CGContextRelease(context);
CGDataProviderRelease(ref);
free(buffer);
free(pixels);
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil);
return image;
}
I want to create an app in which i want to do some image processing. So I would like to know if there is any open-source image processing library available? also I would like to create a filter like this one Glamour Filter any help regarding this would be very much appreciated. If someone already have a source code to create sepia,black and white rotate scale code than please send. Thanks
Here is the code for sepia image
-(UIImage*)makeSepiaScale:(UIImage*)image
{
CGImageRef cgImage = [image CGImage];
CGDataProviderRef provider = CGImageGetDataProvider(cgImage);
CFDataRef bitmapData = CGDataProviderCopyData(provider);
UInt8* data = (UInt8*)CFDataGetBytePtr(bitmapData);
int width = image.size.width;
int height = image.size.height;
NSInteger myDataLength = width * height * 4;
for (int i = 0; i < myDataLength; i+=4)
{
UInt8 r_pixel = data[i];
UInt8 g_pixel = data[i+1];
UInt8 b_pixel = data[i+2];
int outputRed = (r_pixel * .393) + (g_pixel *.769) + (b_pixel * .189);
int outputGreen = (r_pixel * .349) + (g_pixel *.686) + (b_pixel * .168);
int outputBlue = (r_pixel * .272) + (g_pixel *.534) + (b_pixel * .131);
if(outputRed>255)outputRed=255;
if(outputGreen>255)outputGreen=255;
if(outputBlue>255)outputBlue=255;
data[i] = outputRed;
data[i+1] = outputGreen;
data[i+2] = outputBlue;
}
CGDataProviderRef provider2 = CGDataProviderCreateWithData(NULL, data, myDataLength, NULL);
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider2, NULL, NO, renderingIntent);
CGColorSpaceRelease(colorSpaceRef); // YOU CAN RELEASE THIS NOW
CGDataProviderRelease(provider2); // YOU CAN RELEASE THIS NOW
CFRelease(bitmapData);
UIImage *sepiaImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef); // YOU CAN RELEASE THIS NOW
return sepiaImage;
}
Here is the code for Black & White effect
- (UIImage*) createGrayCopy:(UIImage*) source {
int width = source.size.width;
int height = source.size.height;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
CGContextRef context = CGBitmapContextCreate (nil, width,
height,
8, // bits per component
0,
colorSpace,
kCGImageAlphaNone);
CGColorSpaceRelease(colorSpace);
if (context == NULL) {
return nil;
}
CGContextDrawImage(context,
CGRectMake(0, 0, width, height), source.CGImage);
UIImage *grayImage = [UIImage imageWithCGImage:CGBitmapContextCreateImage(context)];
CGContextRelease(context);
return grayImage;
}
Search for OpenCV