iphone image resolution increased when emailed - iphone

In my app i capture an image and then add a frame to it...
I also have the feature to add custom text on the final image (original image + frame). I am using the following code to draw the text.
-(UIImage *)addText:(UIImage *)img text:(NSString *)textInput
{
CGFloat imageWidth = img.size.width;
CGFloat imageHeigth = img.size.height;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(NULL, imageWidth, imageHeigth, 8,
4 * imageWidth, colorSpace, kCGImageAlphaPremultipliedFirst);
CGContextDrawImage(context, CGRectMake(0, 0, imageWidth, imageHeigth), img.CGImage);
CGContextSetCMYKFillColor(context, 0.0, 0.0, 0.0, 1.0,1);
CGContextSetFont(context, customFont);
UIColor * strokeColor = [UIColor blackColor];
CGContextSetFillColorWithColor(context, strokeColor.CGColor);
CGContextSetFontSize(context, DISPLAY_FONT_SIZE * DisplayToOutputScale);
// Create an array of Glyph's the size of text that will be drawn.
CGGlyph textToPrint[[textInput length]];
for (int i = 0; i < [textInput length]; ++i)
{
// Store each letter in a Glyph and subtract the MagicNumber to get appropriate value.
textToPrint[i] = [textInput characterAtIndex:i] + 3 - 32;
}
// First pass to be displayed invisible, will be used to calculate the length of the text in glyph
CGContextSetTextDrawingMode(context, kCGTextInvisible);
CGContextShowGlyphsAtPoint(context, 0 , 0 , textToPrint, [textInput length]);
CGPoint endPoint = CGContextGetTextPosition(context);
// Calculate position of text on white border frame
CGFloat xPos = (imageWidth/2.0f) - (endPoint.x/2.0f);
CGFloat yPos;
yPos = 30 * DisplayToOutputScale;
// Toggle off invisible mode, we are ready to draw the text
CGContextSetTextDrawingMode(context, kCGTextFill);
CGContextShowGlyphsAtPoint(context, xPos , yPos , textToPrint, [textInput length]);
// Extract resulting image
CGImageRef imageMasked = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
return [UIImage imageWithCGImage:imageMasked];
}
I email the image using UIImageJPEGRepresentation and attach the data.
When i email the image without adding custom text the image size increases from 1050 x 1275 to 2100 x 2550 which is strange.
But when i email the image with text added the image size remains unchanged.
Can any one explain me why this happens ??
I think there is something to do with converting from UIImage to UIData.
Thanx

I had the same problem. Fix it with starting the context with a scale of 1:
UIGraphicsBeginImageContextWithOptions(size, YES, 1.0);

Related

How t draw outline/stroke around UIImage (*.png) - Example inside

I tried many many ways to draw a black outline around image.
This is an example of the result I want:
Can someone please let me know how should I do it? or give me an example ?
Edit: i stuck in here: can someone please help me finish it ?
What i did was to make another shape in black color under the the white with shadow and then fill it all in black so it will be like an outline - but i cant figure out how to make the last and important part of making the shadow and fill it to be all in black.
- (IBAction)addStroke:(id)sender{
[iconStrokeTest setImage:[self makeIconStroke:icon.imageView.image]];
}
- (UIImage *)makeIconStroke:(UIImage *)image{
CGImageRef originalImage = [image CGImage];
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef bitmapContext = CGBitmapContextCreate(NULL,
CGImageGetWidth(originalImage),
CGImageGetHeight(originalImage),
8,
CGImageGetWidth(originalImage)*4,
colorSpace,
kCGImageAlphaPremultipliedLast);
CGContextDrawImage(bitmapContext, CGRectMake(0, 0, CGBitmapContextGetWidth(bitmapContext), CGBitmapContextGetHeight(bitmapContext)), originalImage);
CGImageRef finalMaskImage = [self createMaskWithImageAlpha:bitmapContext];
UIImage *result = [UIImage imageWithCGImage:finalMaskImage];
CGContextRelease(bitmapContext);
CGImageRelease(finalMaskImage);
// begin a new image context, to draw our colored image onto
UIGraphicsBeginImageContext(result.size);
// get a reference to that context we created
CGContextRef context = UIGraphicsGetCurrentContext();
// set the fill color
[[UIColor blackColor] setFill];
// translate/flip the graphics context (for transforming from CG* coords to UI* coords
CGContextTranslateCTM(context, 0, result.size.height);
CGContextScaleCTM(context, 1.0, -1.0);
// set the blend mode to color burn, and the original image
CGContextSetBlendMode(context, kCGBlendModeColorBurn);
CGRect rect = CGRectMake(0, 0, result.size.width, result.size.height);
CGContextDrawImage(context, rect, result.CGImage);
// set a mask that matches the shape of the image, then draw (color burn) a colored rectangle
CGContextClipToMask(context, rect, result.CGImage);
CGContextAddRect(context, rect);
CGContextDrawPath(context,kCGPathFill);
// generate a new UIImage from the graphics context we drew onto
UIImage *coloredImg = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
//return the color-burned image
return coloredImg;
}
- (CGImageRef)createMaskWithImageAlpha:(CGContextRef)originalImageContext {
UInt8 *data = (UInt8 *)CGBitmapContextGetData(originalImageContext);
float width = CGBitmapContextGetBytesPerRow(originalImageContext) / 4;
float height = CGBitmapContextGetHeight(originalImageContext);
int strideLength = ROUND_UP(width * 1, 4);
unsigned char * alphaData = (unsigned char * )calloc(strideLength * height, 1);
CGContextRef alphaOnlyContext = CGBitmapContextCreate(alphaData,
width,
height,
8,
strideLength,
NULL,
kCGImageAlphaOnly);
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
unsigned char val = data[y*(int)width*4 + x*4 + 3];
val = 255 - val;
alphaData[y*strideLength + x] = val;
}
}
CGImageRef alphaMaskImage = CGBitmapContextCreateImage(alphaOnlyContext);
CGContextRelease(alphaOnlyContext);
free(alphaData);
// Make a mask
CGImageRef finalMaskImage = CGImageMaskCreate(CGImageGetWidth(alphaMaskImage),
CGImageGetHeight(alphaMaskImage),
CGImageGetBitsPerComponent(alphaMaskImage),
CGImageGetBitsPerPixel(alphaMaskImage),
CGImageGetBytesPerRow(alphaMaskImage),
CGImageGetDataProvider(alphaMaskImage), NULL, false);
CGImageRelease(alphaMaskImage);
return finalMaskImage;
}
Well, theres no build-in API for that. You'll have to do it yourself or find a libary for this. But you could "fake" the effect by drawing the image with a shadow. Note that shadows can be any color, it doesn't have to look like a shadow. This would be the easiest way.
Other than that you could vectorize the raster image and stroke that path. Core image's edge detection filter will help for that but it could turn out to be hard to acomplish.

2D Pixel-Level collision detection between UIImages

I'm trying to code a method which detects when two UIImages collide taking into account only the non-transparent pixels. Just to be clear, a method that returns TRUE when a pixel with its alpha component greater than 0 on a UIImageView overlaps with a pixel also with its alpha component greater than 0 on the other UIImageView.
The method should be something like:
- (void)checkCollisionBetweenImage:(UIImage *)img1 inFrame:(CGRect)frame1 andImage:(UIImage *)img2 inFrame:(CGRect)frame2;
So it receives both images to be checked with its frame passed independently since the coordinate positions must be converted to match (UIImageView.frame won't do).
[UPDATE 1]
I'll update with a piece of code I used in a previous question I made, this code however doesn't always work. I guess the problem lies in the fact that the UIImages used aren't necessarily in the same superview.
Detect pixel collision/overlapping between two images
if (!CGRectIntersectsRect(frame1, frame2)) return NO;
NSLog(#"OverlapsPixelsInImage:withImage:> Images Intersect");
UIImage *img1 = imgView1.image;
UIImage *img2 = imgView2.image;
CGImageRef imgRef1 = [img1 CGImage];
CGImageRef imgRef2 = [img2 CGImage];
float minx = MIN(frame1.origin.x, frame2.origin.x);
float miny = MIN(frame1.origin.y, frame2.origin.y);
float maxx = MAX(frame1.origin.x + frame1.size.width, frame2.origin.x + frame2.size.width);
float maxy = MAX(frame1.origin.y + frame1.size.height, frame2.origin.y + frame2.size.height);
CGRect canvasRect = CGRectMake(0, 0, maxx - minx, maxy - miny);
size_t width = floorf(canvasRect.size.width);
size_t height = floorf(canvasRect.size.height);
NSUInteger bitsPerComponent = 8;
NSUInteger bytesPerRow = 4 * width;
unsigned char *rawData = malloc(canvasRect.size.width * canvasRect.size.height * 4 * bitsPerComponent);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(rawData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast);
CGColorSpaceRelease(colorSpace);
CGContextTranslateCTM(context, 0, canvasRect.size.height);
CGContextScaleCTM(context, 1.0, -1.0);
CGContextClipToMask(context, CGRectMake(frame2.origin.x - minx, frame2.origin.y - miny, frame2.size.width, frame2.size.height), imgRef2);
CGContextDrawImage(context, CGRectMake(frame1.origin.x - minx, frame1.origin.y - miny, frame1.size.width, frame1.size.height), imgRef1);
CGContextRelease(context);
int byteIndex = 0;
for (int i = 0; i < width * height; i++)
{
int8_t alpha = rawData[byteIndex + 3];
if (alpha > 64)
{
NSLog(#"collided in byte: %d", i);
free(rawData);
return YES;
}
byteIndex += 4;
}
free(rawData);
return NO;
[UPDATE 2]
I used another line supposedly required just before drawing the masked image.
CGContextSetBlendMode(context, kCGBlendModeCopy);
Still doesn't work. Curiously, I've been checking the alpha values detected on collision and they are random numbers. It's curious because the images I'm using have only full opacity or full transparency, nothing in between.
Check for the actual frame intersection -> gives you a rectangle.
Check every pixel in one of the images in the rectangle for alpha > 0. If so then convert the current coordinate into the other image's and then if the alpha of that pixel > 0 you have a hit. Repeat until the rectangle is done.
Stop early if you have fount a hit.
Hrmm... Is your problem that your images have white squares behind them and you don't want the white boxes to collide, but you want the images inside them to collide? Well...
I don't think you can check collision based on colors. What you need to do is download some image editing software (like gimp) and crop out the white background.

Detect pixel collision/overlapping between two images

I have two UIImageViews that contain images with some transparent area. Is there any way to check if the non-transparent area between both images collide?
Thanks.
[UPDATE]
So this is what I have up until now, unfortunately it still ain't working but I can't figure out why.
if (!CGRectIntersectsRect(frame1, frame2)) return NO;
NSLog(#"OverlapsPixelsInImage:withImage:> Images Intersect");
UIImage *img1 = imgView1.image;
UIImage *img2 = imgView2.image;
CGImageRef imgRef1 = [img1 CGImage];
CGImageRef imgRef2 = [img2 CGImage];
float minx = MIN(frame1.origin.x, frame2.origin.x);
float miny = MIN(frame1.origin.y, frame2.origin.y);
float maxx = MAX(frame1.origin.x + frame1.size.width, frame2.origin.x + frame2.size.width);
float maxy = MAX(frame1.origin.y + frame1.size.height, frame2.origin.y + frame2.size.height);
CGRect canvasRect = CGRectMake(0, 0, maxx - minx, maxy - miny);
size_t width = floorf(canvasRect.size.width);
size_t height = floorf(canvasRect.size.height);
NSUInteger bitsPerComponent = 8;
NSUInteger bytesPerRow = 4 * width;
unsigned char *rawData = calloc(width * height, sizeof(*rawData));
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(rawData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast);
CGColorSpaceRelease(colorSpace);
CGContextTranslateCTM(context, 0, canvasRect.size.height);
CGContextScaleCTM(context, 1.0, -1.0);
CGContextClipToMask(context, CGRectMake(frame2.origin.x - minx, frame2.origin.y - miny, frame2.size.width, frame2.size.height), imgRef2);
CGContextDrawImage(context, CGRectMake(frame1.origin.x - minx, frame1.origin.y - miny, frame1.size.width, frame1.size.height), imgRef1);
CGContextRelease(context);
int byteIndex = 0;
for (int i = 0; i < width * height; i++)
{
CGFloat alpha = rawData[byteIndex + 3];
if (alpha > 128)
{
NSLog(#"collided in byte: %d", i);
free(rawData);
return YES;
}
byteIndex += 4;
}
free(rawData);
return NO;
You can draw both the alpha channels of both images into a single bitmap context and then look through the data for any transparent pixels. Take a look at the clipRectToPath() code in Clipping CGRect to a CGPath. It's solving a different problem, but the approach is the same. Rather than using CGContextFillPath() to draw into the context, just draw both of your images.
Here's the flow:
Create an alpha-only bitmap context (kCGImageAlphaOnly)
Draw everything you want to compare into it
Walk the pixels looking at the value. In my example, it considers < 128 to be "transparent." If you want fully transparent, use == 0.
When you find a transparent pixel, the example just makes a note of what column it was in. In your problem, you might just return YES, or you might use that data to form another mask.
Not easily, you basically have to read the in raw bitmap data and walk the pixels.

How can i change uiimage color in iphone sdk xcode

I am using different images and i want to include change color option. But i cant. Any body help me?
If you want to do image tinting, see UIImage+Tint.m in kballard/MGImageUtilities. If you want wholesale color replacement (e.g. treat an image as a silhouette and change the entire color to one flat color), see UIImage+Tint.m in mattgemmell/MGImageUtilities.
Its latest and most simple way to do it.
theImageView.image = [theImageView.image imageWithRenderingMode:UIImageRenderingModeAlwaysTemplate];
[theImageView setTintColor:[UIColor redColor]];
One way to accomplish this is to desaturate your image, and add a tint on top of that image with the color you desire.
Desaturate
-(UIImage *) getImageWithUnsaturatedPixelsOfImage:(UIImage *)image {
const int RED = 1, GREEN = 2, BLUE = 3;
CGRect imageRect = CGRectMake(0, 0, image.size.width*2, image.size.height*2);
int width = imageRect.size.width, height = imageRect.size.height;
uint32_t * pixels = (uint32_t *) malloc(width*height*sizeof(uint32_t));
memset(pixels, 0, width * height * sizeof(uint32_t));
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), [image CGImage]);
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
uint8_t * rgbaPixel = (uint8_t *) &pixels[y*width+x];
uint32_t gray = (0.3*rgbaPixel[RED]+0.59*rgbaPixel[GREEN]+0.11*rgbaPixel[BLUE]);
rgbaPixel[RED] = gray;
rgbaPixel[GREEN] = gray;
rgbaPixel[BLUE] = gray;
}
}
CGImageRef newImage = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
free(pixels);
UIImage * resultUIImage = [UIImage imageWithCGImage:newImage scale:2 orientation:0];
CGImageRelease(newImage);
return resultUIImage;
}
Overlay With Color
-(UIImage *) getImageWithTintedColor:(UIImage *)image withTint:(UIColor *)color withIntensity:(float)alpha {
CGSize size = image.size;
UIGraphicsBeginImageContextWithOptions(size, FALSE, 2);
CGContextRef context = UIGraphicsGetCurrentContext();
[image drawAtPoint:CGPointZero blendMode:kCGBlendModeNormal alpha:1.0];
CGContextSetFillColorWithColor(context, color.CGColor);
CGContextSetBlendMode(context, kCGBlendModeOverlay);
CGContextSetAlpha(context, alpha);
CGContextFillRect(UIGraphicsGetCurrentContext(), CGRectMake(CGPointZero.x, CGPointZero.y, image.size.width, image.size.height));
UIImage * tintedImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return tintedImage;
}
How-To
//For a UIImageView
yourImageView.image = [self getImageWithUnsaturatedPixelsOfImage:yourImageView.image];
yourImageView.image = [atom getImageWithTintedColor:yourImageView.image withTint:[UIColor redColor] withIntensity:0.7];
//For a UIImage
yourImage = [self getImageWithUnsaturatedPixelsOfImage:yourImage];
yourImage = [atom getImageWithTintedColor:yourImageView.image withTint:[UIColor redColor] withIntensity:0.7];
You can change the color of the tint to whatever you desire.

iphone taking grayscale screenshot?

I'm wondering if there is a SIMPLE way to take a Grayscale screenshot, i know i can take color screenshot like this:
UIGraphicsBeginImageContext(self.view.bounds.size);
[self.view.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
NSData *data = UIImagePNGRepresentation(image);
now what i need to add in these lines of code to make the UIImage Grayscaled? thank you for reading.
Just convert your image to gray scale.
Read this post. Good luck.
Here is the method:d
#pragma mark -
#pragma mark Grayscale
- (UIImage *)convertImageToGrayScale:(UIImage *)image
{
// Create image rectangle with current image width/height
CGRect imageRect = CGRectMake(0, 0, image.size.width, image.size.height);
// Grayscale color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
// Create bitmap content with current image size and grayscale colorspace
CGContextRef context = CGBitmapContextCreate(nil, image.size.width, image.size.height, 8, 0, colorSpace, kCGImageAlphaNone);
// Draw image into current context, with specified rectangle
// using previously defined context (with grayscale colorspace)
CGContextDrawImage(context, imageRect, [image CGImage]);
// Create bitmap image info from pixel data in current context
CGImageRef imageRef = CGBitmapContextCreateImage(context);
// Create a new UIImage object
UIImage *newImage = [UIImage imageWithCGImage:imageRef];
// Release colorspace, context and bitmap information
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
CFRelease(imageRef);
// Return the new grayscale image
return newImage;
}
Based on Cam's code with the ability to deal with the scale for Retina displays.
- (UIImage *) toGrayscale
{
// Create image rectangle with current image width/height
CGRect imageRect = CGRectMake(0, 0, self.size.width * self.scale, self.size.height * self.scale);
int width = imageRect.size.width;
int height = imageRect.size.height;
// the pixels will be painted to this array
uint32_t *pixels = (uint32_t *) malloc(width * height * sizeof(uint32_t));
// clear the pixels so any transparency is preserved
memset(pixels, 0, width * height * sizeof(uint32_t));
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// create a context with RGBA pixels
CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace,
kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);
// paint the bitmap to our context which will fill in the pixels array
CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]);
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
uint8_t *rgbaPixel = (uint8_t *) &pixels[y * width + x];
// convert to grayscale using recommended method: http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
uint32_t gray = 0.3 * rgbaPixel[RED] + 0.59 * rgbaPixel[GREEN] + 0.11 * rgbaPixel[BLUE];
// set the pixels to gray
rgbaPixel[RED] = gray;
rgbaPixel[GREEN] = gray;
rgbaPixel[BLUE] = gray;
}
}
// create a new CGImageRef from our context with the modified pixels
CGImageRef image = CGBitmapContextCreateImage(context);
// we're done with the context, color space, and pixels
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
free(pixels);
// make a new UIImage to return
UIImage *resultUIImage = [UIImage imageWithCGImage:image
scale:self.scale
orientation:UIImageOrientationUp];
// we're done with image now too
CGImageRelease(image);
return resultUIImage;
}