Image Circular Wrap in iOS - iphone

I have a problem - I want to create a circular wrap function which will wrap an image as depicted below:
This is available in OSX however is not available on iOS.
My logic so far has been:
Split the image up into x sections and for each section:
Rotate alpha degrees
Scale the image in the x axis to create a diamond shaped 'warped' effect of the image
Rotate back 90 - atan((h / 2) / (w / 2))
Translate the offset
My problem is that this seems inaccurate and I have been unable to mathematically figure out how to do this correctly - any help would be massively appreciated.
Link to OSX docs for CICircularWrap:
https://developer.apple.com/library/archive/documentation/GraphicsImaging/Reference/CoreImageFilterReference/index.html#//apple_ref/doc/filter/ci/CICircularWrap

Since CICircularWrap is not supported on iOS (EDIT: it is now - check answer below), one has to code his own effect for now. Probably the simplest way is to compute the transformation from polar to cartesian coordinate systems and then interpolate from the source image. I've come up with this simple (and frankly quite slow - it can be much optimised) algorithm:
#import <QuartzCore/QuartzCore.h>
CGContextRef CreateARGBBitmapContext (size_t pixelsWide, size_t pixelsHigh)
{
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
void * bitmapData;
int bitmapByteCount;
int bitmapBytesPerRow;
// Declare the number of bytes per row. Each pixel in the bitmap in this
// example is represented by 4 bytes; 8 bits each of red, green, blue, and
// alpha.
bitmapBytesPerRow = (int)(pixelsWide * 4);
bitmapByteCount = (int)(bitmapBytesPerRow * pixelsHigh);
// Use the generic RGB color space.
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL)
{
fprintf(stderr, "Error allocating color space\n");
return NULL;
}
// Allocate memory for image data. This is the destination in memory
// where any drawing to the bitmap context will be rendered.
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL)
{
fprintf (stderr, "Memory not allocated!");
CGColorSpaceRelease( colorSpace );
return NULL;
}
// Create the bitmap context. We want pre-multiplied ARGB, 8-bits
// per component. Regardless of what the source image format is
// (CMYK, Grayscale, and so on) it will be converted over to the format
// specified here by CGBitmapContextCreate.
context = CGBitmapContextCreate (bitmapData,
pixelsWide,
pixelsHigh,
8, // bits per component
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
free (bitmapData);
fprintf (stderr, "Context not created!");
}
// Make sure and release colorspace before returning
CGColorSpaceRelease( colorSpace );
return context;
}
CGImageRef circularWrap(CGImageRef inImage,CGFloat bottomRadius, CGFloat topRadius, CGFloat startAngle, BOOL clockWise, BOOL interpolate)
{
if(topRadius < 0 || bottomRadius < 0) return NULL;
// Create the bitmap context
int w = (int)CGImageGetWidth(inImage);
int h = (int)CGImageGetHeight(inImage);
//result image side size (always a square image)
int resultSide = 2*MAX(topRadius, bottomRadius);
CGContextRef cgctx1 = CreateARGBBitmapContext(w,h);
CGContextRef cgctx2 = CreateARGBBitmapContext(resultSide,resultSide);
if (cgctx1 == NULL || cgctx2 == NULL)
{
return NULL;
}
// Get image width, height. We'll use the entire image.
CGRect rect = {{0,0},{w,h}};
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(cgctx1, rect, inImage);
// Now we can get a pointer to the image data associated with the bitmap
// context.
int *data1 = CGBitmapContextGetData (cgctx1);
int *data2 = CGBitmapContextGetData (cgctx2);
int resultImageSize = resultSide*resultSide;
double temp;
for(int *p = data2, pos = 0;pos<resultImageSize;p++,pos++)
{
*p = 0;
int x = pos%resultSide-resultSide/2;
int y = -pos/resultSide+resultSide/2;
CGFloat phi = modf(((atan2(x, y)+startAngle)/2.0/M_PI+0.5),&temp);
if(!clockWise) phi = 1-phi;
phi*=w;
CGFloat r = ((sqrtf(x*x+y*y))-topRadius)*h/(bottomRadius-topRadius);
if(phi>=0 && phi<w && r>=0 && r<h)
{
if(!interpolate || phi >= w-1 || r>=h-1)
{
//pick the closest pixel
*p = data1[(int)r*w+(int)phi];
}
else
{
double dphi = modf(phi, &temp);
double dr = modf(r, &temp);
int8_t* c00 = (int8_t*)(data1+(int)r*w+(int)phi);
int8_t* c01 = (int8_t*)(data1+(int)r*w+(int)phi+1);
int8_t* c10 = (int8_t*)(data1+(int)r*w+w+(int)phi);
int8_t* c11 = (int8_t*)(data1+(int)r*w+w+(int)phi+1);
//interpolate components separately
for(int component = 0; component < 4; component++)
{
double avg = ((*c00 & 0xFF)*(1-dphi)+(*c01 & 0xFF)*dphi)*(1-dr)+((*c10 & 0xFF)*(1-dphi)+(*c11 & 0xFF)*dphi)*dr;
*p += (((int)(avg))<<(component*8));
c00++; c10++; c01++; c11++;
}
}
}
}
CGImageRef result = CGBitmapContextCreateImage(cgctx2);
// When finished, release the context
CGContextRelease(cgctx1);
CGContextRelease(cgctx2);
// Free image data memory for the context
if (data1) free(data1);
if (data2) free(data2);
return result;
}
Use the circularWrap function with parameters:
CGImageRef inImage the source image
CGFloat bottomRadius the bottom side of the source image will transform into a circle with this radius
CGFloat topRadius the same for the top side of the source image, this can be larger or smaler than the bottom radius. (results in wraping around top/bottom of the image)
CGFloat startAngle the angle in which the left and right sides of the source image will transform. BOOL clockWise direction of rendering
BOOL interpolate a simple anti-aliasing algorithm. Only the inside of the image is interpolated
some samples (top left is the source image):
with code:
image1 = [UIImage imageWithCGImage:circularWrap(sourceImage.CGImage,0,300,0,YES,NO)];
image2 = [UIImage imageWithCGImage:circularWrap(sourceImage.CGImage,100,300,M_PI_2,NO,YES)];
image3 = [UIImage imageWithCGImage:circularWrap(sourceImage.CGImage,300,200,M_PI_4,NO,YES)];
image4 = [UIImage imageWithCGImage:circularWrap(sourceImage.CGImage,250,300,0,NO,NO)];
enjoy! :)

Apple have added CICircularWrap to iOS 9
https://developer.apple.com/library/mac/documentation/GraphicsImaging/Reference/CoreImageFilterReference/index.html#//apple_ref/doc/filter/ci/CICircularWrap
Wraps an image around a transparent circle.
Localized Display Name
Circular Wrap Distortion
Availability
Available in OS X v10.5 and later and in iOS 9 and later.

Related

convert image to pixels format and back to UIImage format changes image in iPhone

I have firstly convert the image to raw pixels and again convert the pixels back to UIImage, after converting the image it changes it color and also become some transparent, I have tried a lot but not able to get the problem. Here is my code:
-(UIImage*)markPixels:(NSMutableArray*)pixels OnImage:(UIImage*)image{
CGImageRef inImage = image.CGImage;
// Create off screen bitmap context to draw the image into. Format ARGB is 4 bytes for each pixel: Alpa, Red, Green, Blue
CGContextRef cgctx = [self createARGBBitmapContextFromImage:inImage];
if (cgctx == NULL) { return nil; /* error */ }
size_t w = CGImageGetWidth(inImage);
size_t h = CGImageGetHeight(inImage);
CGRect rect = {{0,0},{w,h}};
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(cgctx, rect, inImage);
// Now we can get a pointer to the image data associated with the bitmap
// context.
int r = 3;
int p = 2*r+1;
unsigned char* data = CGBitmapContextGetData (cgctx);
int i = 0;
while (data[i]&&data[i+1]) {
// NSLog(#"%d",pixels[i]);
i++;
}
NSLog(#"%d %zd %zd",i,w,h);
NSLog(#"%ld",sizeof(CGBitmapContextGetData (cgctx)));
for(int i = 0; i< pixels.count-1 ; i++){
NSValue*touch1 = [pixels objectAtIndex:i];
NSValue*touch2 = [pixels objectAtIndex:i+1];
NSArray *linePoints = [self returnLinePointsBetweenPointA:[touch1 CGPointValue] pointB:[touch2 CGPointValue]];
for(NSValue *touch in linePoints){
NSLog(#"point = %#",NSStringFromCGPoint([touch CGPointValue]));
CGPoint location = [touch CGPointValue];
for(int i = -r ; i<p ;i++)
for(int j= -r; j<p;j++)
{
if(i<=0 && j<=0 && i>image.size.height && j>image.size.width)
continue;
NSInteger index = (location.y+i) * w*4 + (location.x+j)* 4;
index = 0;
data[index +3] = 125;
}
}
}
// When finished, release the context
CGContextRelease(cgctx);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGDataProviderRef dp = CGDataProviderCreateWithData(NULL, data, w*h*4, NULL);
CGImageRef img = CGImageCreate(w, h, 8, 32, 4*w, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big, dp, NULL, NO, kCGRenderingIntentDefault);
UIImage* ret_image = [UIImage imageWithCGImage:img];
CGImageRelease(img);
CGColorSpaceRelease(colorSpace);
// Free image data memory for the context
if (data) { free(data); }
return ret_image;
}
First one is original image and second image is after applying this code.
You have to ask the CGImageRef if it uses alpha or not, and the format of the components per pixel - look at all the CGImageGet... functions. Most likely the image is not ARGB but BGRA.
I often create and render pure green images then print out the first pixel to insure I got it right (BGRA -> 0 255 0 255) etc. It really gets confusing with host order etc and alpha first or last (does that mean before host order is applied before or after?)
EDIT: You told the CGDataProviderCreateWithData to use 'kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big', but I don't see you asking the original image for how its configured. My guess is that changing 'kCGBitmapByteOrder32Big' to 'kCGBitmapByteOrder32Little' will fix your problem but the alpha may be wrong too.
Images can have different values for alpha and byte order so you really need to ask the original image how its configured then adapt to that (or remap the bytes in memory to whatever format you want.)

CGBitmapContextCreate and interlaced? image

I'm converting some image drawing code from Cairo to Quartz and I'm slowly making progress and learning Quartz along the way but I've run into a problem with the image format.
In the Cairo version it works like this:
unsigned short *d = (unsigned short*)imageSurface->get_data();
int stride = imageSurface->get_stride() >> 1;
int height = imageHeight;
int width = imageWidth;
do {
d = *p++; // p = raw image data
width --;
if( width == 0 ) {
height --;
width = imageWidth;
d += stride;
}
} while( height );
Now this produces the image as expected on the Cairo::ImageSurface. I've converted this over to how use Quartz and it is making progress but I'm not sure where I'm going wrong:
NSInteger pixelLen = (width * height) * 8;
unsigned char *d = (unsigned char*)malloc(pixelLen);
unsigned char *rawPixels = d;
int height = imageHeight;
int width = imageWidth;
do {
d = *p++; // p = raw image data
width --;
if( width == 0 ) {
height --;
width = imageWidth;
}
} while( height );
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(rawPixels, imageWidth, imageHeight, 8, tileSize * sizeof(int), colorSpace, kCGBitmapByteOrderDefault);
CGImageRef image = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
UIImage *resultUIImage = [UIImage imageWithCGImage:image];
CGImageRelease(image);
Now this is obviously heading in the right direction as it produces something that looks a bit like the desired image but it creates 4 copies of the image in a row, each with different pixels filled in so I'm assuming this is an interlaced image (I don't know a great deal about image formats) and that I need to somehow combine them somehow to create a complete image but I don't know how to do that with Quartz.
I think the stride has something to do with the problem but from what I understand this is the byte distance from one row of pixels to another which would not be relevant in the context of Quartz?
It sounds like stride would correspond to rowBytes or bytesPerRow. This value is important because it is not necessarily equal to width * bytesPerPixel because rows might be padded to optimized offsets.
It's not completely obvious what the Cairo code is doing, and it doesn't look quite correct either. Either way, without the stride part, your loop makes no sense because it makes an exact copy of the bytes.
The loop in the Cairo code is copying a row of bytes, then jumping over the next row of data.

split UIImage by colors and create 2 images

I have looked through replacing colors in an image but cannot get it to work how i need because I am trying to do it with every color but one, as well as transparency.
what I am looking for is a way to take in an image and split out a color (say all the pure black) from that image. Then take that split out portion and make a new image with a transparent background and the split out portion.
(here is just an example of the idea, say i want to take a screenshot of this page. make every other color but pure black be transparent, and save that new image to the library, or put it into a UIImageView)
i have looked in to CGImageCreateWithMaskingColors but cant seem to do what I need with the transparent portion, and I dont really understand the colorMasking input other than you can provide it with a {Rmin,Rmax,Gmin,Gmax,Bmin,Bmax} color mask but when I do, it colors everything. any ideas or input would be great.
Sounds like you're going to have to get access to the underlying bytes and write code to process them directly. You can use CGImageGetDataProvider() to get access to the data of an image, but there's no guarantee that the format will be something you know how to handle. Alternately you can create a new CGContextRef using a specific format you know how to handle, then draw the original image into your new context, then process the underlying data. Here's a quick attempt at doing what you want (uncompiled):
- (UIImage *)imageWithBlackPixels:(UIImage *)image {
CGImageRef cgImage = image.CGImage;
// create a premultiplied ARGB context with 32bpp
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
size_t width = CGImageGetWidth(cgImage);
size_t height = CGImageGetHeight(cgImage);
size_t bpc = 8; // bits per component
size_t bpp = bpc * 4 / 8; // bytes per pixel
size_t bytesPerRow = bpp * width;
void *data = malloc(bytesPerRow * height);
CGBitmapInfo bitmapInfo = kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Host;
CGContextRef ctx = CGBitmapContextCreate(data, width, height, bpc, bytesPerRow, colorspace, bitmapInfo);
CGColorSpaceRelease(colorspace);
if (ctx == NULL) {
// couldn't create the context - double-check the parameters?
free(data);
return nil;
}
// draw the image into the context
CGContextDrawImage(ctx, CGRectMake(0, 0, width, height), cgImage);
// replace all non-black pixels with transparent
// preserve existing transparency on black pixels
for (size_t y = 0; y < height; y++) {
size_t rowStart = bytesPerRow * y;
for (size_t x = 0; x < width; x++) {
size_t pixelOffset = rowStart + x*bpp;
// check the RGB components of the pixel
if (data[pixelOffset+1] != 0 || data[pixelOffset+2] != 0 || data[pixelOffset+3] != 0) {
// this pixel contains non-black. zero it out
memset(&data[pixelOffset], 0, 4);
}
}
}
// create our new image and release the context data
CGImageRef newCGImage = CGBitmapContextCreateImage(ctx);
CGContextRelease(ctx);
free(data);
UIImage *newImage = [UIImage imageWithCGImage:newCGImage scale:image.scale orientation:image.imageOrientation];
CGImageRelease(newCGImage);
return newImage;
}

Is there a way to measure skin tone of an object from an iphone app

Im trying to find a way to programme/buy an app to use the iphone to detect someones skin tone against an objective scale using RGB froma phot they take of themselves. Anyone got any pointers?
I think the objections to this are correct - its going to be pretty hard work to calibrate it. However, any solution is going to rely on being able to get the colour of a specific pixel in an image (in this case a UIImageView...)
- (UIColor*) getPixelColorAtLocation:(CGPoint)point {
UIColor* color = nil;
CGImageRef inImage = self.image.CGImage;
// Create off screen bitmap context to draw the image into. Format ARGB is 4 bytes for each pixel: Alpa, Red, Green, Blue
CGContextRef cgctx = [self createARGBBitmapContextFromImage:inImage];
if (cgctx == NULL) { return nil; /* error */ }
size_t w = CGImageGetWidth(inImage);
size_t h = CGImageGetHeight(inImage);
CGRect rect = {{0,0},{w,h}};
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(cgctx, rect, inImage);
// Now we can get a pointer to the image data associated with the bitmap
// context.
unsigned char* data = CGBitmapContextGetData (cgctx);
if (data != NULL) {
//offset locates the pixel in the data from x,y.
//4 for 4 bytes of data per pixel, w is width of one row of data.
int offset = 4*((w*round(point.y))+round(point.x));
int alpha = data[offset];
int red = data[offset+1];
int green = data[offset+2];
int blue = data[offset+3];
NSLog(#"offset: %i colors: RGB A %i %i %i %i",offset,red,green,blue,alpha);
color = [UIColor colorWithRed:(red/255.0f) green:(green/255.0f) blue:(blue/255.0f) alpha:(alpha/255.0f)];
}
// When finished, release the context
CGContextRelease(cgctx);
// Free image data memory for the context
if (data) { free(data); }
return color;
}
This code is from a colourPicker class which carries the following (c)
// Created by markj on 3/6/09.
// Copyright 2009 Mark Johnson. All rights reserved.
The full article is here
http://www.markj.net/iphone-uiimage-pixel-color/

iPhone Objective C: UIImage - Region of Interest

Is there any way to get the min rectangle area which contains all the non-transparent part of an UIImage?
Reading pixel by pixel to check where alpha == 0 ...isn't a good way I believe.
Any better way?
Many thanks for reading
I don't think there's a way to do this without examining the image pixel by pixel. Where are the images coming from? If you control them, you can at least do the pixel by pixel part only once and then either cache the information or distribute it along with the images if people are downloading them.
Okay here is my ugly solution to this problem. I hope there is a better way to do this.
- (CGRect) getROIRect:(UIImage*)pImage {
CGRect roiRect = {{0,0}, {0,0}};
int vMaxX = -999;
int vMinX = 999;
int vMaxY = -999;
int vMinY = 999;
int x,y;
CGImageRef inImage = pImage.CGImage;
// Create off screen bitmap context to draw the image into. Format ARGB is 4 bytes for each pixel: Alpa, Red, Green, Blue
CGContextRef cgctx = [self createARGBBitmapContextFromImage:inImage];
if (cgctx == NULL) { return roiRect; /* error */ }
size_t w = CGImageGetWidth(inImage);
size_t h = CGImageGetHeight(inImage);
CGRect rect = {{0,0},{w,h}};
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(cgctx, rect, inImage);
// Now we can get a pointer to the image data associated with the bitmap
// context.
unsigned char* data ;
BOOL tSet = NO;
data= CGBitmapContextGetData (cgctx);
if (data != NULL) {
for (x=0;x<w;x++) {
for (y=0;y<h;y++) {
//offset locates the pixel in the data from x,y.
//4 for 4 bytes of data per pixel, w is width of one row of data.
int offset = 4*((w*round(y))+round(x));
int alpha = data[offset];
if (alpha > 0) {
tSet = YES;
if (x > vMaxX) {
vMaxX = x;
}
if (x < vMinX) {
vMinX = x;
}
if (y > vMaxY) {
vMaxY = y;
}
if (y < vMinY) {
vMinY = y;
}
}
}
}
}
if (!tSet) {
vMaxX = w;
vMinX = 0;
vMaxY = h;
vMinY = 0;
}
// When finished, release the context
CGContextRelease(cgctx);
// Free image data memory for the context
if (data) { free(data); }
CGRect roiRect2 = {{vMinX,vMinY},{vMaxX - vMinX,vMaxY - vMinY}};
return roiRect2;
}
- (CGContextRef) createARGBBitmapContextFromImage:(CGImageRef) inImage {
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
void * bitmapData;
int bitmapByteCount;
int bitmapBytesPerRow;
// Get image width, height. We'll use the entire image.
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);
// Declare the number of bytes per row. Each pixel in the bitmap in this
// example is represented by 4 bytes; 8 bits each of red, green, blue, and
// alpha.
bitmapBytesPerRow = (pixelsWide * 4);
bitmapByteCount = (bitmapBytesPerRow * pixelsHigh);
// Use the generic RGB color space.
//colorSpace = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB);
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL)
{
fprintf(stderr, "Error allocating color space\n");
return NULL;
}
// Allocate memory for image data. This is the destination in memory
// where any drawing to the bitmap context will be rendered.
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL)
{
fprintf (stderr, "Memory not allocated!");
CGColorSpaceRelease( colorSpace );
return NULL;
}
// Create the bitmap context. We want pre-multiplied ARGB, 8-bits
// per component. Regardless of what the source image format is
// (CMYK, Grayscale, and so on) it will be converted over to the format
// specified here by CGBitmapContextCreate.
context = CGBitmapContextCreate (bitmapData, pixelsWide, pixelsHigh, 8, bitmapBytesPerRow, colorSpace, kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
free (bitmapData);
fprintf (stderr, "Context not created!");
}
// Make sure and release colorspace before returning
CGColorSpaceRelease( colorSpace );
return context;
}