How do I create/render a UIImage from a 3D transformed UIImageView? - iphone

After applying a 3d transform to an UIImageView.layer, I need to save the resulting "view" as a new UIImage... Seemed like a simple task at first :-) but no luck so far, and searching hasn't turned up any clues :-( so I'm hoping someone will be kind enough to point me in the right direction.
A very simple iPhone project is available here.
Thanks.
- (void)transformImage {
float degrees = 12.0;
float zDistance = 250;
CATransform3D transform3D = CATransform3DIdentity;
transform3D.m34 = 1.0 / zDistance; // the m34 cell of the matrix controls perspective, and zDistance affects the "sharpness" of the transform
transform3D = CATransform3DRotate(transform3D, DEGREES_TO_RADIANS(degrees), 1, 0, 0); // perspective transform on y-axis
imageView.layer.transform = transform3D;
}
/* FAIL : capturing layer contents doesn't get the transformed image -- just the original
CGImageRef newImageRef = (CGImageRef)imageView.layer.contents;
UIImage *image = [UIImage imageWithCGImage:newImageRef];
*/
/* FAIL : docs for renderInContext states that it does not render 3D transforms
UIGraphicsBeginImageContext(imageView.image.size);
[imageView.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
*/
//
// header
//
#import <QuartzCore/QuartzCore.h>
#define DEGREES_TO_RADIANS(x) x * M_PI / 180
UIImageView *imageView;
#property (nonatomic, retain) IBOutlet UIImageView *imageView;
//
// code
//
#synthesize imageView;
- (void)transformImage {
float degrees = 12.0;
float zDistance = 250;
CATransform3D transform3D = CATransform3DIdentity;
transform3D.m34 = 1.0 / zDistance; // the m34 cell of the matrix controls perspective, and zDistance affects the "sharpness" of the transform
transform3D = CATransform3DRotate(transform3D, DEGREES_TO_RADIANS(degrees), 1, 0, 0); // perspective transform on y-axis
imageView.layer.transform = transform3D;
}
- (UIImage *)captureView:(UIImageView *)view {
UIGraphicsBeginImageContext(view.frame.size);
[view.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
- (void)imageSavedToPhotosAlbum:(UIImage *)image didFinishSavingWithError:(NSError *)error contextInfo:(void *)contextInfo {
NSString *title = #"Save to Photo Album";
NSString *message = (error ? [error description] : #"Success!");
UIAlertView *alert = [[UIAlertView alloc] initWithTitle:title message:message delegate:nil cancelButtonTitle:#"OK" otherButtonTitles:nil];
[alert show];
[alert release];
}
- (IBAction)saveButtonClicked:(id)sender {
UIImage *newImage = [self captureView:imageView];
UIImageWriteToSavedPhotosAlbum(newImage, self, #selector(imageSavedToPhotosAlbum: didFinishSavingWithError: contextInfo:), nil);
}

I ended up creating a render method pixel per pixel on the CPU using the inverse of the view transform.
Basically, it renders the original UIImageView into a UIImage. Then every pixel in the UIImage is multiplied by the inverse transform matrix to generate the transformed UIImage.
RenderUIImageView.h
#import <UIKit/UIKit.h>
#import <QuartzCore/CATransform3D.h>
#import <QuartzCore/CALayer.h>
#interface RenderUIImageView : UIImageView
- (UIImage *)generateImage;
#end
RenderUIImageView.m
#import "RenderUIImageView.h"
#interface RenderUIImageView()
#property (assign) CATransform3D transform;
#property (assign) CGRect rect;
#property (assign) float denominatorx;
#property (assign) float denominatory;
#property (assign) float denominatorw;
#property (assign) float factor;
#end
#implementation RenderUIImageView
- (UIImage *)generateImage
{
_transform = self.layer.transform;
_denominatorx = _transform.m12 * _transform.m21 - _transform.m11 * _transform.m22 + _transform.m14 * _transform.m22 * _transform.m41 - _transform.m12 * _transform.m24 * _transform.m41 - _transform.m14 * _transform.m21 * _transform.m42 +
_transform.m11 * _transform.m24 * _transform.m42;
_denominatory = -_transform.m12 *_transform.m21 + _transform.m11 *_transform.m22 - _transform.m14 *_transform.m22 *_transform.m41 + _transform.m12 *_transform.m24 *_transform.m41 + _transform.m14 *_transform.m21 *_transform.m42 -
_transform.m11* _transform.m24 *_transform.m42;
_denominatorw = _transform.m12 *_transform.m21 - _transform.m11 *_transform.m22 + _transform.m14 *_transform.m22 *_transform.m41 - _transform.m12 *_transform.m24 *_transform.m41 - _transform.m14 *_transform.m21 *_transform.m42 +
_transform.m11 *_transform.m24 *_transform.m42;
_rect = self.bounds;
if (UIGraphicsBeginImageContextWithOptions != NULL) {
UIGraphicsBeginImageContextWithOptions(_rect.size, NO, 0.0);
} else {
UIGraphicsBeginImageContext(_rect.size);
}
if ([[UIScreen mainScreen] respondsToSelector:#selector(displayLinkWithTarget:selector:)] &&
([UIScreen mainScreen].scale == 2.0)) {
_factor = 2.0f;
} else {
_factor = 1.0f;
}
UIImageView *img = [[UIImageView alloc] initWithFrame:_rect];
img.image = self.image;
[img.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *source = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
CGContextRef ctx;
CGImageRef imageRef = [source CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *inputData = malloc(height * width * 4);
unsigned char *outputData = malloc(height * width * 4);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(inputData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
context = CGBitmapContextCreate(outputData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
for (int ii = 0 ; ii < width * height ; ++ii)
{
int x = ii % width;
int y = ii / width;
int indexOutput = 4 * x + 4 * width * y;
CGPoint p = [self modelToScreen:(x*2/_factor - _rect.size.width)/2.0 :(y*2/_factor - _rect.size.height)/2.0];
p.x *= _factor;
p.y *= _factor;
int indexInput = 4*(int)p.x + (4*width*(int)p.y);
if (p.x >= width || p.x < 0 || p.y >= height || p.y < 0 || indexInput > width * height *4)
{
outputData[indexOutput] = 0.0;
outputData[indexOutput+1] = 0.0;
outputData[indexOutput+2] = 0.0;
outputData[indexOutput+3] = 0.0;
}
else
{
outputData[indexOutput] = inputData[indexInput];
outputData[indexOutput+1] = inputData[indexInput + 1];
outputData[indexOutput+2] = inputData[indexInput + 2];
outputData[indexOutput+3] = 255.0;
}
}
ctx = CGBitmapContextCreate(outputData,CGImageGetWidth( imageRef ),CGImageGetHeight( imageRef ),8,CGImageGetBytesPerRow( imageRef ),CGImageGetColorSpace( imageRef ), kCGImageAlphaPremultipliedLast );
imageRef = CGBitmapContextCreateImage (ctx);
UIImage* rawImage = [UIImage imageWithCGImage:imageRef];
CGContextRelease(ctx);
free(inputData);
free(outputData);
return rawImage;
}
- (CGPoint) modelToScreen : (float) x: (float) y
{
float xp = (_transform.m22 *_transform.m41 - _transform.m21 *_transform.m42 - _transform.m22* x + _transform.m24 *_transform.m42 *x + _transform.m21* y - _transform.m24* _transform.m41* y) / _denominatorx;
float yp = (-_transform.m11 *_transform.m42 + _transform.m12 * (_transform.m41 - x) + _transform.m14 *_transform.m42 *x + _transform.m11 *y - _transform.m14 *_transform.m41* y) / _denominatory;
float wp = (_transform.m12 *_transform.m21 - _transform.m11 *_transform.m22 + _transform.m14*_transform.m22* x - _transform.m12 *_transform.m24* x - _transform.m14 *_transform.m21* y + _transform.m11 *_transform.m24 *y) / _denominatorw;
CGPoint result = CGPointMake(xp/wp, yp/wp);
return result;
}
#end

Theoretically, you could use the (now-allowed) undocumented call UIGetScreenImage() after quickly rendering it to the screen on a black background, but in practice this will be slow and ugly, so don't use it ;P.

I have the same problem with you and I found the solution!
I want to rotate the UIImageView, because I will have the animation.
And save the image, I use this method:
void CGContextConcatCTM(CGContextRef c, CGAffineTransform transform)
the transform param is the transform of your UIImageView!. So anything you have done to the imageView will be the same with image!.
And I have write a category method of UIImage.
-(UIImage *)imageRotateByTransform:(CGAffineTransform)transform{
// calculate the size of the rotated view's containing box for our drawing space
UIView *rotatedViewBox = [[UIView alloc] initWithFrame:CGRectMake(0,0,self.size.width, self.size.height)];
rotatedViewBox.transform = transform;
CGSize rotatedSize = rotatedViewBox.frame.size;
[rotatedViewBox release];
// Create the bitmap context
UIGraphicsBeginImageContext(rotatedSize);
CGContextRef bitmap = UIGraphicsGetCurrentContext();
// Move the origin to the middle of the image so we will rotate and scale around the center.
CGContextTranslateCTM(bitmap, rotatedSize.width/2, rotatedSize.height/2);
//Rotate the image context using tranform
CGContextConcatCTM(bitmap, transform);
// Now, draw the rotated/scaled image into the context
CGContextScaleCTM(bitmap, 1.0, -1.0);
CGContextDrawImage(bitmap, CGRectMake(-self.size.width / 2, -self.size.height / 2, self.size.width, self.size.height), [self CGImage]);
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
Hope this will help you.

Have you had a look at this? UIImage from UIView

I had the same problem, I was able to use UIView's drawViewHierarchyInRect:afterScreenUpdates: method, from iOS 7.0 -
(Documentation)
It draws the whole tree as it appears on the screen.
UIGraphicsBeginImageContextWithOptions(viewToRender.bounds.size, YES, 0);
[viewToRender drawViewHierarchyInRect:viewToRender.bounds afterScreenUpdates:YES];
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();

Let say you have and UIImageView called imageView.
If you apply 3d transform and try to render this view with UIGraphicsImageRenderer transforms are ignored.
imageView.layer.transform = someTransform3d
but if you convert CATransform3d to CGAffine transform using CATransform3DGetAffineTransform and apply it to transform property of image view, it works.
imageView.transform = CATransform3DGetAffineTransform(someTransform3d)
And then, you can use the extension below to save it as UIImage
extension UIView {
func asImage() -> UIImage {
let renderer = UIGraphicsImageRenderer(bounds: bounds)
return renderer.image { rendererContext in
layer.render(in: rendererContext.cgContext)
}
}
}
And just call
let image = imageView.asImage()

In your captureView: method, try replacing this line:
[view.layer renderInContext:UIGraphicsGetCurrentContext()];
with this:
[view.layer.superlayer renderInContext:UIGraphicsGetCurrentContext()];
You may have to adjust the size you use to create the image context.
I don't see anything in the API doc that says renderInContext: ignores 3D transformations. However, the transformations apply to the layer, not its contents, which is why you need to render the superlayer to see the transformation applied.
Note that calling drawRect: on the superview definitely won't work, as drawRect: does not draw subviews.

3D transform on UIImage / CGImageRef
I've improved Marcos Fuentes answer. You should be able to calculate the mapping of each pixel yourself.. Not perfect, but it does the trick...
It is available on this repository http://github.com/hfossli/AGGeometryKit/
The interesting files is
https://github.com/hfossli/AGGeometryKit/blob/master/Source/AGTransformPixelMapper.m
https://github.com/hfossli/AGGeometryKit/blob/master/Source/CGImageRef%2BCATransform3D.m
https://github.com/hfossli/AGGeometryKit/blob/master/Source/UIImage%2BCATransform3D.m
3D transform on UIView / UIImageView
https://stackoverflow.com/a/12820877/202451
Then you will have full control over each point in the quadrilateral. :)

A solution I found that at least worked in my case was to subclass CALayer. When a renderInContext: message is sent to a layer, that layer automatically forwards that message to all its sublayers. So all I had to do was to subclass CALayer and override the renderInContext: method and render what I needed to be rendered in the provided context.
For example, in my code I had a layer for which I was setting its contents to an image of an arrow:
UIImage *image = [UIImage imageNamed:#"arrow.png"];
MYLayer *myLayer = [[CALayer alloc] init];
[myLayer setContents:(__bridge id)[image CGImage]];
[self.mainLayer addSublayer:myLayer];
Now when I was applying a 3D 180 degree rotation over the Y-axis on the arrow and was trying to do a [self.mainLayer renderInContext:context] afterwards I was still getting the un-rotated image.
So in my subclass MyLayer I overrode renderInContext: and used an already rotated image to draw in provided context:
- (void)renderInContext:(CGContextRef)ctx
{
NSLog(#"Rendered in context");
UIImage *image = [UIImage imageNamed:#"arrow_rotated.png"];
CGContextDrawImage(ctx, self.bounds, image.CGImage);
}
This worked in my case, however I can see that if you are doing lots of 3D transforms you may not be able to have an image ready for every possible scenario. In many other cases though it should be possible to render the result of 3D transform using 2D transforms in the passed context. For example in my case instead of using a different image arrow_rotated.png I could use the arrow.png image and mirror it and draw it in the context.

Related

How to fit image in irregular shape frame

I have a problem regarding irregular shape. I searched a lot but nothing was useful for me. I have a number of frames of irregular shape and each frame is again divided into sub areas. I want to fit images from photo library in each sub areas of frame. But i am unable to get location of each sub areas and since shape is also irregular so again another problem to fit image in that area. Can anyone help me !! This is an example of that frame.
You can never have irregular shaped frames. Frames will always in rect shape.
You can have it done by detecting transparent areas.
Refer this link. It will give you idea how to do that :)
Do you want to clip the various images by the arc of the circle? For example, here is a screen snapshot with four images (just random images I got from a search for dogs on http://images.google.com):
And here are the same four images cropped by a circle (or more accurately, each of the four images were separately cropped by the same circle path):
Here is the code that does that
- (UIImage *)cropImage:(UIImage *)image locatedAt:(CGRect)imageFrame byCircleAt:(CGPoint)center withRadius:(float)radius
{
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(NULL, imageFrame.size.width, imageFrame.size.height, 8, 4 * imageFrame.size.width, colorSpace, kCGImageAlphaPremultipliedFirst);
CGContextBeginPath(context);
CGRect ellipseFrame = CGRectMake(center.x - imageFrame.origin.x - radius, imageFrame.size.height - (center.y - imageFrame.origin.y - radius) - radius * 2.0, radius * 2.0, radius * 2.0);
CGContextAddEllipseInRect(context, ellipseFrame);
CGContextClosePath(context);
CGContextClip(context);
CGContextDrawImage(context, CGRectMake(0, 0, imageFrame.size.width, imageFrame.size.height), image.CGImage);
CGImageRef imageMasked = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
UIImage *newImage = [UIImage imageWithCGImage:imageMasked];
CGImageRelease(imageMasked);
return newImage;
}
- (void)addSingleCroppedImage:(UIImage *)image at:(CGRect)imageFrame byCircleAt:(CGPoint)center withRadius:(float)radius
{
UIImage *newImage = [self cropImage:image locatedAt:imageFrame byCircleAt:center withRadius:radius];
UIImageView *imageView = [[UIImageView alloc] initWithFrame:imageFrame];
imageView.image = newImage;
[self.view addSubview:imageView];
}
- (void)addCroppedImages
{
NSString *bundlePath = [[NSBundle mainBundle] resourcePath];
CGPoint center = CGPointMake(self.view.frame.size.width / 2.0, self.view.frame.size.width / 2.0);
float radius = 150.0;
UIImage *dog1 = [UIImage imageWithContentsOfFile:[bundlePath stringByAppendingPathComponent:#"imgres-1.jpg"]];
UIImage *dog2 = [UIImage imageWithContentsOfFile:[bundlePath stringByAppendingPathComponent:#"imgres-2.jpg"]];
UIImage *dog3 = [UIImage imageWithContentsOfFile:[bundlePath stringByAppendingPathComponent:#"imgres-3.jpg"]];
UIImage *dog4 = [UIImage imageWithContentsOfFile:[bundlePath stringByAppendingPathComponent:#"imgres-4.jpg"]];
CGRect frame;
UIImage *currentImage;
// upper left
currentImage = dog1;
frame = CGRectMake(center.x - currentImage.size.width, center.y - currentImage.size.height, currentImage.size.width, currentImage.size.height);
[self addSingleCroppedImage:currentImage at:frame byCircleAt:center withRadius:radius];
// upper right
currentImage = dog2;
frame = CGRectMake(center.x, center.y - currentImage.size.height, currentImage.size.width, currentImage.size.height);
[self addSingleCroppedImage:currentImage at:frame byCircleAt:center withRadius:radius];
// lower left
currentImage = dog3;
frame = CGRectMake(center.x - currentImage.size.width, center.y, currentImage.size.width, currentImage.size.height);
[self addSingleCroppedImage:currentImage at:frame byCircleAt:center withRadius:radius];
// lower right
currentImage = dog4;
frame = CGRectMake(center.x, center.y, currentImage.size.width, currentImage.size.height);
[self addSingleCroppedImage:currentImage at:frame byCircleAt:center withRadius:radius];
}

Save OpenGL drawn item as an image

I have downloaded the sample code GLPaint from developer.Apple website to draw pictures on a Canvas using OpenGL.
I have made many changes to the GLPaint application to meet my requirements. Now, I would like to save the drawn item into photo-library as an image.
I know the method to save an image in the photo-library. So, I tried to create the corresponding image file after drawing a picture. Do you know what's the good way to do it? Any help on this is highly appreciated.
The code details are described below.
PaintingView.h
EAGLContext *context;
// OpenGL names for the renderbuffer and framebuffers used to render to this view
GLuint viewRenderbuffer, viewFramebuffer;
// OpenGL name for the depth buffer that is attached to viewFramebuffer, if it exists (0 if it does not exist)
GLuint depthRenderbuffer;
GLuint brushTexture;
CGPoint location;
CGPoint previousLocation;
PaintingView.m
// Handles the start of a touch
- (void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event
{
CGRect bounds = [self bounds];
UITouch* touch = [[event touchesForView:self] anyObject];
firstTouch = YES;
// Convert touch point from UIView referential to OpenGL one (upside-down flip)
location = [touch locationInView:self];
location.y = bounds.size.height - location.y;
}
// Handles the continuation of a touch.
- (void)touchesMoved:(NSSet *)touches withEvent:(UIEvent *)event
{
CGRect bounds = [self bounds];
UITouch* touch = [[event touchesForView:self] anyObject];
// Convert touch point from UIView referential to OpenGL one (upside-down flip)
if (firstTouch) {
firstTouch = NO;
previousLocation = [touch previousLocationInView:self];
previousLocation.y = bounds.size.height - previousLocation.y;
} else {
location = [touch locationInView:self];
location.y = bounds.size.height - location.y;
previousLocation = [touch previousLocationInView:self];
previousLocation.y = bounds.size.height - previousLocation.y;
}
// Render the stroke
[self renderLineFromPoint:previousLocation toPoint:location];
}
// Handles the end of a touch event when the touch is a tap.
- (void)touchesEnded:(NSSet *)touches withEvent:(UIEvent *)event
{
CGRect bounds = [self bounds];
UITouch* touch = [[event touchesForView:self] anyObject];
if (firstTouch) {
firstTouch = NO;
previousLocation = [touch previousLocationInView:self];
previousLocation.y = bounds.size.height - previousLocation.y;
[self renderLineFromPoint:previousLocation toPoint:location];
}
}
// Drawings a line onscreen based on where the user touches
- (void) renderLineFromPoint:(CGPoint)start toPoint:(CGPoint)end
{
static GLfloat* vertexBuffer = NULL;
static NSUInteger vertexMax = 64;
NSUInteger vertexCount = 0,
count,
i;
[EAGLContext setCurrentContext:context];
glBindFramebufferOES(GL_FRAMEBUFFER_OES, viewFramebuffer);
// Convert locations from Points to Pixels
CGFloat scale = self.contentScaleFactor;
start.x *= scale;
start.y *= scale;
end.x *= scale;
end.y *= scale;
// Allocate vertex array buffer
if(vertexBuffer == NULL)
vertexBuffer = malloc(vertexMax * 2 * sizeof(GLfloat));
// Add points to the buffer so there are drawing points every X pixels
count = MAX(ceilf(sqrtf((end.x - start.x) * (end.x - start.x) + (end.y - start.y) * (end.y - start.y)) / kBrushPixelStep), 1);
for(i = 0; i < count; ++i) {
if(vertexCount == vertexMax) {
vertexMax = 2 * vertexMax;
vertexBuffer = realloc(vertexBuffer, vertexMax * 2 * sizeof(GLfloat));
}
vertexBuffer[2 * vertexCount + 0] = start.x + (end.x - start.x) * ((GLfloat)i / (GLfloat)count);
vertexBuffer[2 * vertexCount + 1] = start.y + (end.y - start.y) * ((GLfloat)i / (GLfloat)count);
vertexCount += 1;
}
// Render the vertex array
glVertexPointer(2, GL_FLOAT, 0, vertexBuffer);
glDrawArrays(GL_POINTS, 0, vertexCount);
// Display the buffer
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
}
// Erases the screen
- (void) erase
{
[EAGLContext setCurrentContext:context];
// Clear the buffer
glBindFramebufferOES(GL_FRAMEBUFFER_OES, viewFramebuffer);
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT);
// Display the buffer
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
}
// The GL view is stored in the nib file. When it's unarchived it's sent -initWithCoder:
- (id)initWithCoder:(NSCoder*)coder {
CGImageRef brushImage;
CGContextRef brushContext;
GLubyte *brushData;
size_t width, height;
if ((self = [super initWithCoder:coder])) {
CAEAGLLayer *eaglLayer = (CAEAGLLayer *)self.layer;
eaglLayer.opaque = YES;
// In this application, we want to retain the EAGLDrawable contents after a call to presentRenderbuffer.
eaglLayer.drawableProperties = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:YES], kEAGLDrawablePropertyRetainedBacking, kEAGLColorFormatRGBA8, kEAGLDrawablePropertyColorFormat, nil];
context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES1];
if (!context || ![EAGLContext setCurrentContext:context]) {
[self release];
return nil;
}
// Create a texture from an image
// First create a UIImage object from the data in a image file, and then extract the Core Graphics image
brushImage = [UIImage imageNamed:#"Particle.png"].CGImage;
// Get the width and height of the image
width = CGImageGetWidth(brushImage);
height = CGImageGetHeight(brushImage);
// Texture dimensions must be a power of 2. If you write an application that allows users to supply an image,
// you'll want to add code that checks the dimensions and takes appropriate action if they are not a power of 2.
// Make sure the image exists
if(brushImage) {
// Allocate memory needed for the bitmap context
brushData = (GLubyte *) calloc(width * height * 4, sizeof(GLubyte));
// Use the bitmatp creation function provided by the Core Graphics framework.
brushContext = CGBitmapContextCreate(brushData, width, height, 8, width * 4, CGImageGetColorSpace(brushImage), kCGImageAlphaPremultipliedLast);
// After you create the context, you can draw the image to the context.
CGContextDrawImage(brushContext, CGRectMake(0.0, 0.0, (CGFloat)width, (CGFloat)height), brushImage);
// You don't need the context at this point, so you need to release it to avoid memory leaks.
CGContextRelease(brushContext);
// Use OpenGL ES to generate a name for the texture.
glGenTextures(1, &brushTexture);
// Bind the texture name.
glBindTexture(GL_TEXTURE_2D, brushTexture);
// Set the texture parameters to use a minifying filter and a linear filer (weighted average)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// Specify a 2D texture image, providing the a pointer to the image data in memory
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, brushData);
// Release the image data; it's no longer needed
free(brushData);
}
// Set the view's scale factor
self.contentScaleFactor = 1.0;
// Setup OpenGL states
glMatrixMode(GL_PROJECTION);
CGRect frame = self.bounds;
CGFloat scale = self.contentScaleFactor;
// Setup the view port in Pixels
glOrthof(0, frame.size.width * scale, 0, frame.size.height * scale, -1, 1);
glViewport(0, 0, frame.size.width * scale, frame.size.height * scale);
glMatrixMode(GL_MODELVIEW);
glDisable(GL_DITHER);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glEnable(GL_BLEND);
// Set a blending function appropriate for premultiplied alpha pixel data
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_POINT_SPRITE_OES);
glTexEnvf(GL_POINT_SPRITE_OES, GL_COORD_REPLACE_OES, GL_TRUE);
glPointSize(width / kBrushScale);
// Make sure to start with a cleared buffer
needsErase = YES;
}
return self;
}
AppDelegate.h
PaintingWindow *window; //its a class inherited from window.
PaintingView *drawingView;
#property (nonatomic, retain) IBOutlet PaintingWindow *window;
#property (nonatomic, retain) IBOutlet PaintingView *drawingView;
#synthesize window;
#synthesize drawingView;
AppDelegate.m
- (void) applicationDidFinishLaunching:(UIApplication*)application
{
CGRect rect = [[UIScreen mainScreen] applicationFrame];
CGFloat components[3];
// Create a segmented control so that the user can choose the brush color.
UISegmentedControl *segmentedControl = [[UISegmentedControl alloc] initWithItems:
[NSArray arrayWithObjects:
[UIImage imageNamed:#"Red.png"],
[UIImage imageNamed:#"Yellow.png"],
[UIImage imageNamed:#"Green.png"],
[UIImage imageNamed:#"Blue.png"],
[UIImage imageNamed:#"Purple.png"],
nil]];
// Compute a rectangle that is positioned correctly for the segmented control you'll use as a brush color palette
//CGRect frame = CGRectMake(rect.origin.x + kLeftMargin, rect.size.height - kPaletteHeight - kTopMargin, rect.size.width - (kLeftMargin + kRightMargin), kPaletteHeight);
CGRect frame = CGRectMake(50, 22, (rect.size.width - (kLeftMargin + kRightMargin)) - 20, kPaletteHeight);
segmentedControl.frame = frame;
// When the user chooses a color, the method changeBrushColor: is called.
[segmentedControl addTarget:self action:#selector(changeBrushColor:) forControlEvents:UIControlEventValueChanged];
segmentedControl.segmentedControlStyle = UISegmentedControlStyleBar;
// Make sure the color of the color complements the black background
segmentedControl.tintColor = [UIColor darkGrayColor];
// Set the third color (index values start at 0)
segmentedControl.selectedSegmentIndex = 2;
// Add the control to the window
[window addSubview:segmentedControl];
// Now that the control is added, you can release it
[segmentedControl release];
[self addBackgroundSegmentControll];
// Define a starting color
HSL2RGB((CGFloat) 2.0 / (CGFloat)kPaletteSize, kSaturation, kLuminosity, &components[0], &components[1], &components[2]);
// Defer to the OpenGL view to set the brush color
[drawingView setBrushColorWithRed:components[0] green:components[1] blue:components[2]];
// Look in the Info.plist file and you'll see the status bar is hidden
// Set the style to black so it matches the background of the application
[application setStatusBarStyle:UIStatusBarStyleBlackTranslucent animated:NO];
// Now show the status bar, but animate to the style.
[application setStatusBarHidden:NO withAnimation:YES];
// Load the sounds
NSBundle *mainBundle = [NSBundle mainBundle];
erasingSound = [[SoundEffect alloc] initWithContentsOfFile:[mainBundle pathForResource:#"Erase" ofType:#"caf"]];
selectSound = [[SoundEffect alloc] initWithContentsOfFile:[mainBundle pathForResource:#"Select" ofType:#"caf"]];
[window setFrame:CGRectMake(0, 0, 768, 1024)];
drawingView.frame = CGRectMake(0, 0, 768, 1024);
// Erase the view when recieving a notification named "shake" from the NSNotificationCenter object
// The "shake" nofification is posted by the PaintingWindow object when user shakes the device
[[NSNotificationCenter defaultCenter] addObserver:self selector:#selector(eraseView) name:#"shake" object:nil];
}
An improved version of Ramshad's answer:
This one has no memory leaks and works with new versions of iOS and for different view sizes and displays (retina and non-retina).
CGFloat scale = [[UIScreen mainScreen] scale]; // use nativeScale on iOS 8.0+
CGSize imageSize = CGSizeMake((scale * view.frame.size.width), (scale * view.frame.size.height));
NSUInteger length = imageSize.width * imageSize.height * 4;
GLubyte * buffer = (GLubyte *)malloc(length * sizeof(GLubyte));
if(buffer == NULL)
return nil;
glReadPixels(0, 0, imageSize.width, imageSize.height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer, length, NULL);
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * imageSize.width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef imageRef = CGImageCreate(imageSize.width, imageSize.height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
UIGraphicsBeginImageContext(imageSize);
CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0.0, 0.0, imageSize.width, imageSize.height), imageRef);
UIImage * image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
CGImageRelease(imageRef);
CGColorSpaceRelease(colorSpaceRef);
CGDataProviderRelease(provider);
free(buffer);
return image;
Please refer the below link to save an OpenGL drawn item as an image in the photo-library.
Save an OpenGL drawn item as an image
Code Details;
Call [self captureToPhotoAlbum]; after writing the below code.
-(void)captureToPhotoAlbum {
UIImage *image = [self glToUIImage];
UIImageWriteToSavedPhotosAlbum(image, self, nil, nil);
}
- (UIImage *)glToUIImage {
NSInteger myDataLength = 320 * 480 * 4;
// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc(myDataLength);
glReadPixels(0, 0, 320, 480, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
for(int y = 0; y < 480; y++)
{
for(int x = 0; x < 320 * 4; x++)
{
buffer2[(479 - y) * 320 * 4 + x] = buffer[y * 4 * 320 + x];
}
}
// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);
// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * 320;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
// make the cgimage
CGImageRef imageRef = CGImageCreate(320, 480, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
// then make the uiimage from that
UIImage *myImage = [UIImage imageWithCGImage:imageRef];
return myImage;
}
For iPad or to fix the scaling issue,change all the width's as 640 instead of 320 and height's as 960 instead of 480. Change the
Height and Width values up to meet your scaling.
Manage the memory(free the buffers)
Thanks.
If the iPhone supports it, you can read from an OpenGL context using glReadPixels. After that's done, you should be able to create something like a UIImage from the pixel data you have read and save it to the photo library like you would for any other image created by an application.
The code works perfectly on iOS 5.0 programmed in XCode 4.0.
Although, now in XCode 4.5 and with the developers preview iOS 6.0 the code does not work as it should.The image tha is saved is tottaly black! It is saved in "My photos", with the resolution we choose, but it is a black image!
I guess that something has changed from the XCode programmers in
glReadPixels(0, 0, 320, 480, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
or in
CGImageRef imageRef = CGImageCreate(320, 480, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
UIImage *myImage = [UIImage imageWithCGImage:imageRef];
It is not a matter of 320, 480 or any other resolution variables. In XCode 4.0 worked perfectly and fast even for Retina resolutions.

How do you crop an image in iOS

I have a photo app where you can add stickers in one section. When you're finished I want to save the image. Here is the code that I have to do that.
if(UIGraphicsBeginImageContextWithOptions != NULL)
{
UIGraphicsBeginImageContextWithOptions(self.view.frame.size, YES, 2.5);
} else {
UIGraphicsBeginImageContext(self.view.frame.size);
}
CGContextRef contextNew=UIGraphicsGetCurrentContext();
[self.view.layer renderInContext:contextNew];
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
Now the image that gets saved is the full screen of the image, which is fine, but now I need to crop the image and I don't know how. You can see the image at the link below:
http://dl.dropbox.com/u/19130454/Photo%202012-04-09%201%2036%2018%20PM.png
I need to crop:
91px from the left and right
220px from the bottom
Any help would be greatly appreciated. If I haven't explained things clearly, please let me know and I'll do my best to re-explain.
How about something like this
CGRect clippedRect = CGRectMake(self.view.frame.origin.x+91, self.view.frame.origin.y, self.view.frame.size.width-91*2, self.view.frame.size.height-220);
CGImageRef imageRef = CGImageCreateWithImageInRect([image CGImage], clippedRect);
UIImage *newImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
Following code may help you.
You should get the correct cropFrame fist by below method
-(CGRect)cropRectForFrame:(CGRect)frame
{
// NSAssert(self.contentMode == UIViewContentModeScaleAspectFit, #"content mode must be aspect fit");
CGFloat widthScale = imageview.superview.bounds.size.width / imageview.image.size.width;
CGFloat heightScale = imageview.superview.bounds.size.height / imageview.image.size.height;
float x, y, w, h, offset;
if (widthScale<heightScale) {
offset = (imageview.superview.bounds.size.height - (imageview.image.size.height*widthScale))/2;
x = frame.origin.x / widthScale;
y = (frame.origin.y-offset) / widthScale;
w = frame.size.width / widthScale;
h = frame.size.height / widthScale;
} else {
offset = (imageview.superview.bounds.size.width - (imageview.image.size.width*heightScale))/2;
x = (frame.origin.x-offset) / heightScale;
y = frame.origin.y / heightScale;
w = frame.size.width / heightScale;
h = frame.size.height / heightScale;
}
return CGRectMake(x, y, w, h);
}
Then you need to call this method to get cropped image
- (UIImage *)imageByCropping:(UIImage *)image toRect:(CGRect)rect
{
// you need to update scaling factor value if deice is not retina display
UIGraphicsBeginImageContextWithOptions(rect.size,
/* your view opaque */ NO,
/* scaling factor */ 2.0);
// stick to methods on UIImage so that orientation etc. are automatically
// dealt with for us
[image drawAtPoint:CGPointMake(-rect.origin.x, -rect.origin.y)];
UIImage *result = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return result;
}
- (UIImage*)imageByCropping:(CGRect)rect
{
//create a context to do our clipping in
UIGraphicsBeginImageContext(rect.size);
CGContextRef currentContext = UIGraphicsGetCurrentContext();
//create a rect with the size we want to crop the image to
//the X and Y here are zero so we start at the beginning of our
//newly created context
CGRect clippedRect = CGRectMake(0, 0, rect.size.width, rect.size.height);
CGContextClipToRect( currentContext, clippedRect);
//create a rect equivalent to the full size of the image
//offset the rect by the X and Y we want to start the crop
//from in order to cut off anything before them
CGRect drawRect = CGRectMake(rect.origin.x * -1,
rect.origin.y * -1,
self.size.width,
self.size.height);
//draw the image to our clipped context using our offset rect
// CGContextDrawImage(currentContext, drawRect, self.CGImage);
[self drawInRect:drawRect]; // This will fix getting inverted image from context.
//pull the image from our cropped context
UIImage *cropped = UIGraphicsGetImageFromCurrentImageContext();
//pop the context to get back to the default
UIGraphicsEndImageContext();
//Note: this is autoreleased
return cropped;
}
Refer the below link for crop image
https://github.com/myang-git/iOS-Image-Crop-View
** How to Use **
Very easy! It is created to be a drop-in component, so no static library, no extra dependencies. Just copy ImageCropView.h and ImageCropView.m to your project, and implement ImageCropViewControllerDelegate protocol.
Use it like UIImagePicker:
- (void)cropImage:(UIImage *)image{
ImageCropViewController *controller = [[ImageCropViewController alloc] initWithImage:image];
controller.delegate = self;
[[self navigationController] pushViewController:controller animated:YES];
}
- (void)ImageCropViewController:(ImageCropViewController *)controller didFinishCroppingImage:(UIImage *)croppedImage{
image = croppedImage;
imageView.image = croppedImage;
[[self navigationController] popViewControllerAnimated:YES];
}
- (void)ImageCropViewControllerDidCancel:(ImageCropViewController *)controller{
imageView.image = image;
[[self navigationController] popViewControllerAnimated:YES];
}

How to resize the image programmatically in objective-c in iphone

I have an application where I am displaying large images in a small space.
The images are quite large, but I am only displaying them in 100x100 pixel frames.
My app is responding slowly because of the size fo the images I am using.
To improve performance, how can I resize the images programmatically using Objective-C?
Please find the following code.
- (UIImage *)imageWithImage:(UIImage *)image convertToSize:(CGSize)size {
UIGraphicsBeginImageContext(size);
[image drawInRect:CGRectMake(0, 0, size.width, size.height)];
UIImage *destImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return destImage;
}
This code is for just change image scale not for resizing. You have to set CGSize as your image width and hight so the image will not stretch and it arrange at the middle.
- (UIImage *)imageWithImage:(UIImage *)image scaledToFillSize:(CGSize)size
{
CGFloat scale = MAX(size.width/image.size.width, size.height/image.size.height);
CGFloat width = image.size.width * scale;
CGFloat height = image.size.height * scale;
CGRect imageRect = CGRectMake((size.width - width)/2.0f,
(size.height - height)/2.0f,
width,
height);
UIGraphicsBeginImageContextWithOptions(size, NO, 0);
[image drawInRect:imageRect];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
My favorite way to do this is with CGImageSourceCreateThumbnailAtIndex (in the ImageIO framework). The name is a bit misleading.
Here's an excerpt of some code from a recent app of mine.
CGFloat maxw = // whatever;
CGFloat maxh = // whatever;
CGImageSourceRef src = NULL;
if ([imageSource isKindOfClass:[NSURL class]])
src = CGImageSourceCreateWithURL((__bridge CFURLRef)imageSource, nil);
else if ([imageSource isKindOfClass:[NSData class]])
src = CGImageSourceCreateWithData((__bridge CFDataRef)imageSource, nil);
// if at double resolution, double the thumbnail size and use double-resolution image
CGFloat scale = 1;
if ([[UIScreen mainScreen] scale] > 1.0) {
scale = 2;
maxw *= 2;
maxh *= 2;
}
// load the image at the desired size
NSDictionary* d = #{
(id)kCGImageSourceShouldAllowFloat: (id)kCFBooleanTrue,
(id)kCGImageSourceCreateThumbnailWithTransform: (id)kCFBooleanTrue,
(id)kCGImageSourceCreateThumbnailFromImageAlways: (id)kCFBooleanTrue,
(id)kCGImageSourceThumbnailMaxPixelSize: #((int)(maxw > maxh ? maxw : maxh))
};
CGImageRef imref = CGImageSourceCreateThumbnailAtIndex(src, 0, (__bridge CFDictionaryRef)d);
if (NULL != src)
CFRelease(src);
UIImage* im = [UIImage imageWithCGImage:imref scale:scale orientation:UIImageOrientationUp];
if (NULL != imref)
CFRelease(imref);
If you are using a image on different sizes and resizing each time it will degrade your app performance. Solution is don't resize them just use button in place of imageview. and just set the image on button it will resize automatically and you will get great performance.
I was also resizing images while setting it on cell but my app got slow So I used Button in place of imageview (not resizing images programatically button is doing this job) and it is working perfectly fine.
-(UIImage *)scaleImage:(UIImage *)image toSize:. (CGSize)targetSize
{
//If scaleFactor is not touched, no scaling will occur
CGFloat scaleFactor = 1.0;
//Deciding which factor to use to scale the image (factor = targetSize / imageSize)
if (image.size.width > targetSize.width ||
image.size.height > targetSize.height || image.size.width == image.size.height)
if (!((scaleFactor = (targetSize.width /
image.size.width)) > (targetSize.height /
image.size.height))) //scale to fit width, or
scaleFactor = targetSize.height / image.size.height; // scale to fit heigth.
Since the code ran perfectly fine in iOS 4, for backwards compatibility I added a check for OS version and for anything below 5.0 the old code would work.
- (UIImage *)resizedImage:(CGSize)newSize interpolationQuality:(CGInterpolationQuality)quality {
BOOL drawTransposed;
CGAffineTransform transform = CGAffineTransformIdentity;
if ([[[UIDevice currentDevice] systemVersion] floatValue] >= 5.0) {
// Apprently in iOS 5 the image is already correctly rotated, so we don't need to rotate it manually
drawTransposed = NO;
} else {
switch (self.imageOrientation) {
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
drawTransposed = YES;
break;
default:
drawTransposed = NO;
}
transform = [self transformForOrientation:newSize];
}
return [self resizedImage:newSize
transform:transform
drawTransposed:drawTransposed
interpolationQuality:quality];
}
You can use this.
[m_Image.layer setMinificationFilter:kCAFilterTrilinear];
This thread is old, but it is what I pulled up when trying to solve this problem. Once the image is scaled it was not displaying well in my container even though I turned auto layout off. The easiest way for me to solve this for display in a table row, was to paint the image on a white background that had a fixed size.
Helper function
+(UIImage*)scaleMaintainAspectRatio:(UIImage*)sourceImage :(float)i_width :(float)i_height
{
float newHeight = 0.0;
float newWidth = 0.0;
float oldWidth = sourceImage.size.width;
float widthScaleFactor = i_width / oldWidth;
float oldHeight = sourceImage.size.height;
float heightScaleFactor = i_height / oldHeight;
if (heightScaleFactor > widthScaleFactor) {
newHeight = oldHeight * widthScaleFactor;
newWidth = sourceImage.size.width * widthScaleFactor;
} else {
newHeight = sourceImage.size.height * heightScaleFactor;
newWidth = oldWidth * heightScaleFactor;
}
// return image in white rect
float cxPad = i_width - newWidth;
float cyPad = i_height - newHeight;
if (cyPad > 0) {
cyPad = cyPad / 2.0;
}
if (cxPad > 0) {
cxPad = cxPad / 2.0;
}
CGSize size = CGSizeMake(i_width, i_height);
UIGraphicsBeginImageContextWithOptions(CGSizeMake(size.width, size.height), YES, 0.0);
[[UIColor whiteColor] setFill];
UIRectFill(CGRectMake(0, 0, size.width, size.height));
[sourceImage drawInRect:CGRectMake((int)cxPad, (int)cyPad, newWidth, newHeight)];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
// will return scaled image at actual size, not in white rect
// UIGraphicsBeginImageContext(CGSizeMake(newWidth, newHeight));
// [sourceImage drawInRect:CGRectMake(0, 0, newWidth, newHeight)];
// UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
// UIGraphicsEndImageContext();
// return newImage;
}
I called this like this from my table view cellForRowAtIndexPath
PFFile *childsPicture = [object objectForKey:#"picture"];
[childsPicture getDataInBackgroundWithBlock:^(NSData *imageData, NSError *error) {
if (!error) {
UIImage *largePicture = [UIImage imageWithData:imageData];
UIImage *scaledPicture = [Utility scaleMaintainAspectRatio:largePicture :70.0 :70.0 ];
PFImageView *thumbnailImageView = (PFImageView*)[cell viewWithTag:100];
thumbnailImageView.image = scaledPicture;
[self.tableView reloadData];
}
}];
Hello from the end of 2018.
Solved with next solution (you need only last line, first & second are just for explanation):
NSURL *url = [NSURL URLWithString:response.json[0][#"photo_50"]];
NSData *data = [NSData dataWithContentsOfURL:url];
UIImage *image = [UIImage imageWithData:data scale:customScale];
'customScale' is scale which you want (>1 if image must be smaller, <1 if image must be bigger).
This c method will resize your image with cornerRadius "Without effecting image's quality" :
UIImage *Resize_Image(UIImage *iImage, CGFloat iSize, CGFloat icornerRadius) {
CGFloat scale = MAX(CGSizeMake(iSize ,iSize).width/iImage.size.width, CGSizeMake(iSize ,iSize).height/iImage.size.height);
CGFloat width = iImage.size.width * scale;
CGFloat height = iImage.size.height * scale;
CGRect imageRect = CGRectMake((CGSizeMake(iSize ,iSize).width - width)/2.0f,(CGSizeMake(iSize ,iSize).height - height)/2.0f,width,height);
UIGraphicsBeginImageContextWithOptions(CGSizeMake(iSize ,iSize), NO, 0);
[[UIBezierPath bezierPathWithRoundedRect:imageRect cornerRadius:icornerRadius] addClip];
[iImage drawInRect:imageRect];
UIImage *ResizedImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return ResizedImage;
}
This is how to use :
UIImage *ResizedImage = Resize_Image([UIImage imageNamed:#"image.png"], 64, 14.4);
I do not remember where i took the first 4 lines ..

UIImage resize (Scale proportion) [duplicate]

This question already has answers here:
Closed 10 years ago.
Possible Duplicate:
Resize UIImage with aspect ratio?
The following piece of code is resizing the image perfectly, but the problem is that it messes up the aspect ratio (resulting in a skewed image). Any pointers?
// Change image resolution (auto-resize to fit)
+ (UIImage *)scaleImage:(UIImage*)image toResolution:(int)resolution {
CGImageRef imgRef = [image CGImage];
CGFloat width = CGImageGetWidth(imgRef);
CGFloat height = CGImageGetHeight(imgRef);
CGRect bounds = CGRectMake(0, 0, width, height);
//if already at the minimum resolution, return the orginal image, otherwise scale
if (width <= resolution && height <= resolution) {
return image;
} else {
CGFloat ratio = width/height;
if (ratio > 1) {
bounds.size.width = resolution;
bounds.size.height = bounds.size.width / ratio;
} else {
bounds.size.height = resolution;
bounds.size.width = bounds.size.height * ratio;
}
}
UIGraphicsBeginImageContext(bounds.size);
[image drawInRect:CGRectMake(0.0, 0.0, bounds.size.width, bounds.size.height)];
UIImage *imageCopy = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return imageCopy;
}
I used this single line of code to create a new UIImage which is scaled. Set the scale and orientation params to achieve what you want. The first line of code just grabs the image.
// grab the original image
UIImage *originalImage = [UIImage imageNamed:#"myImage.png"];
// scaling set to 2.0 makes the image 1/2 the size.
UIImage *scaledImage =
[UIImage imageWithCGImage:[originalImage CGImage]
scale:(originalImage.scale * 2.0)
orientation:(originalImage.imageOrientation)];
That's ok not a big problem . thing is u got to find the proportional width and height
like if size is 2048.0 x 1360.0 which has to be resized to 320 x 480 resolution then the resulting image size should be 722.0 x 480.0
here is the formulae to do that . if w,h is original and x,y are resulting image.
w/h=x/y
=>
x=(w/h)*y;
submitting w=2048,h=1360,y=480 => x=722.0 ( here width>height. if height>width then consider x to be 320 and calculate y)
U can submit in this web page . ARC
Confused ? alright , here is category for UIImage which will do the thing for you.
#interface UIImage (UIImageFunctions)
- (UIImage *) scaleToSize: (CGSize)size;
- (UIImage *) scaleProportionalToSize: (CGSize)size;
#end
#implementation UIImage (UIImageFunctions)
- (UIImage *) scaleToSize: (CGSize)size
{
// Scalling selected image to targeted size
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(NULL, size.width, size.height, 8, 0, colorSpace, kCGImageAlphaPremultipliedLast);
CGContextClearRect(context, CGRectMake(0, 0, size.width, size.height));
if(self.imageOrientation == UIImageOrientationRight)
{
CGContextRotateCTM(context, -M_PI_2);
CGContextTranslateCTM(context, -size.height, 0.0f);
CGContextDrawImage(context, CGRectMake(0, 0, size.height, size.width), self.CGImage);
}
else
CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), self.CGImage);
CGImageRef scaledImage=CGBitmapContextCreateImage(context);
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
UIImage *image = [UIImage imageWithCGImage: scaledImage];
CGImageRelease(scaledImage);
return image;
}
- (UIImage *) scaleProportionalToSize: (CGSize)size1
{
if(self.size.width>self.size.height)
{
NSLog(#"LandScape");
size1=CGSizeMake((self.size.width/self.size.height)*size1.height,size1.height);
}
else
{
NSLog(#"Potrait");
size1=CGSizeMake(size1.width,(self.size.height/self.size.width)*size1.width);
}
return [self scaleToSize:size1];
}
#end
-- the following is appropriate call to do this if img is the UIImage instance.
img=[img scaleProportionalToSize:CGSizeMake(320, 480)];
This fixes the math to scale to the max size in both width and height rather than just one depending on the width and height of the original.
- (UIImage *) scaleProportionalToSize: (CGSize)size
{
float widthRatio = size.width/self.size.width;
float heightRatio = size.height/self.size.height;
if(widthRatio > heightRatio)
{
size=CGSizeMake(self.size.width*heightRatio,self.size.height*heightRatio);
} else {
size=CGSizeMake(self.size.width*widthRatio,self.size.height*widthRatio);
}
return [self scaleToSize:size];
}
This change worked for me:
// The size returned by CGImageGetWidth(imgRef) & CGImageGetHeight(imgRef) is incorrect as it doesn't respect the image orientation!
// CGImageRef imgRef = [image CGImage];
// CGFloat width = CGImageGetWidth(imgRef);
// CGFloat height = CGImageGetHeight(imgRef);
//
// This returns the actual width and height of the photo (and hence solves the problem
CGFloat width = image.size.width;
CGFloat height = image.size.height;
CGRect bounds = CGRectMake(0, 0, width, height);
Try to make the bounds's size integer.
#include <math.h>
....
if (ratio > 1) {
bounds.size.width = resolution;
bounds.size.height = round(bounds.size.width / ratio);
} else {
bounds.size.height = resolution;
bounds.size.width = round(bounds.size.height * ratio);
}