CTFrameGetLineOrigins Got Incorrect Origins - iphone

I Use CTFrameGetLineOrigins for get the origin of each line. But it always returns wrong line origin.
The code:
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextSetTextMatrix(context, CGAffineTransformIdentity);
CGContextTranslateCTM(context, 0, self.bounds.size.height);
CGContextScaleCTM(context, 1.0, -1.0);
CTFontRef font = CTFontCreateUIFontForLanguage(kCTFontUserFontType, self.font.pointSize, NULL);
CFAttributedStringSetAttribute(attrString, CFRangeMake(0, CFAttributedStringGetLength(attrString)), kCTForegroundColorAttributeName, self.textColor.CGColor);
CFAttributedStringSetAttribute(attrString, CFRangeMake(0, CFAttributedStringGetLength(attrString)), kCTFontAttributeName, font);
CTFramesetterRef framesetter = CTFramesetterCreateWithAttributedString(attrString);
CGPathRef path = CGPathCreateWithRect(CGRectInset(rect, TEXT_MARGIN, TEXT_MARGIN), NULL);
CTFrameRef frame = CTFramesetterCreateFrame(framesetter, CFRangeMake(0, 0), path, NULL);
CFArrayRef lines = CTFrameGetLines(frame);
size_t numOfLines = CFArrayGetCount(lines);
CGPoint lineOrigins[numOfLines];
CTFrameGetLineOrigins(frame, CFRangeMake(0, 0), lineOrigins);
for (CFIndex i = 0; i < numOfLines; i++) {
CTLineRef line = CFArrayGetValueAtIndex(lines, i);
CFArrayRef runs = CTLineGetGlyphRuns(line);
size_t numOfRuns = CFArrayGetCount(runs);
for (CFIndex j = 0; j < numOfRuns; j++) {
CTRunRef run = CFArrayGetValueAtIndex(runs, j);
CGRect strFrame = CGRectZero;
CGFloat width = 0;
CGFloat height = 0;
CGFloat leading = 0;
CGFloat ascent = 0;
CGFloat descent = 0;
CFRange strRange = CTRunGetStringRange(run);
CGFloat offsetX = CTLineGetOffsetForStringIndex(line, strRange.location, NULL);
width = (CGFloat)CTRunGetTypographicBounds(run, CFRangeMake(0, 0), &ascent, &descent, &leading);
width += leading;
height = ascent + descent;
NSLog(#"Line %ld : Offset Y: %f", i+1, lineOrigins[i].y);
strFrame = CGRectMake(lineOrigins[i].x + offsetX + TEXT_MARGIN, lineOrigins[i].y + TEXT_MARGIN - descent, width, height);
strFrame = CGRectIntegral(strFrame);
CFDictionaryRef attr = CTRunGetAttributes(run);
if (attr != NULL) {
CFStringRef url = CFDictionaryGetValue(attr, kCTFontURLAttribute);
if (url != NULL) {
[self.wordsFrame addObject:[NSDictionary dictionaryWithObjectsAndKeys:(__bridge NSString *)url, #"URL", NSStringFromCGRect(strFrame), #"frame", nil]];
}
}
}
}
CTFrameDraw(frame, context);
CFRelease(font);
CGPathRelease(path);
CFRelease(frame);
CFRelease(framesetter);
Results:
2012-05-30 12:58:21.464 hjday[9362:707] Line 1 : Offset Y: 97.000000
2012-05-30 12:58:21.466 hjday[9362:707] Line 2 : Offset Y: 68.000000
2012-05-30 12:58:21.472 hjday[9362:707] Line 3 : Offset Y: 39.000000
Can anyone help me finger out what's wrong with my code? Or is there any other way(s) to get the origin of each CTRun?

I got it:
strFrame = CGRectMake(lineOrigins[i].x + offsetX + TEXT_MARGIN, self.bounds.size.height - (lineOrigins[i].y + TEXT_MARGIN - descent) - height, width, height);
need flip the y coordinate

Related

adding ellipsis to NSString

I have the following code, which I am trying to draw using core text and that's why I can't clip the text like what UILabel does. in other words I have to figure out the ellipsis ('...') my self.
CGSize commentSize = [[self.sizeDictionary_ valueForKey:commentSizeKey] CGSizeValue];
CGSize actualSize = [[self.sizeDictionary_ valueForKey:actualCommentSizeKey] CGSizeValue];
NSString *actualComment = self.highlightItem_.comment;
if (actualSize.height > commentSize.height){
actualComment = [self.highlightItem_.comment stringByReplacingCharactersInRange:NSMakeRange(68, 3) withString:#"..."];
}
I am having a difficult time finding the range of where the '...' based on a CGSize. What would be the best way in figuring this out?
Here's how I am drawing it:
CFStringRef string = CFBridgingRetain(actualComment);
CFMutableAttributedStringRef comment = CFAttributedStringCreateMutable(kCFAllocatorDefault, 0);
CFAttributedStringReplaceString (comment ,CFRangeMake(0, 0), string);
CGColorRef blue = CGColorRetain([UIColor colorWithRed:131/255.f green:204/255.f blue:253/255.f alpha:1.0].CGColor);
CGColorRef gray = CGColorRetain([UIColor colorWithWhite:165/255.f alpha:1.0].CGColor);
CFAttributedStringSetAttribute(comment, CFRangeMake(0, [name length]),kCTForegroundColorAttributeName, blue);
CFAttributedStringSetAttribute(comment, CFRangeMake([name length], [self.highlightItem_.comment length] - [name length]),kCTForegroundColorAttributeName, gray);
CGColorRelease (blue);
CGColorRelease (gray);
CTFontRef nameFont = CTFontCreateWithName(CFBridgingRetain(kProximaNovaBold), 13.0f, nil);
CFAttributedStringSetAttribute(comment,CFRangeMake(0, [name length]),kCTFontAttributeName,nameFont);
CTFontRef commentFont = CTFontCreateWithName(CFBridgingRetain(kProximaNovaRegular), 13.0f, nil);
CFAttributedStringSetAttribute(comment, CFRangeMake([name length], [self.highlightItem_.comment length] - [name length]),kCTFontAttributeName,commentFont);
CGFloat commentYOffset = floorf((self.commentHeight_ - commentSize.height)/2);
CGContextSaveGState(context);
CGRect captionFrame = CGRectMake(0, 0, rect.size.width - 80, commentSize.height);
CTFramesetterRef framesetter = CTFramesetterCreateWithAttributedString(comment);
CGMutablePathRef captionFramePath = CGPathCreateMutable();
CGPathAddRect(captionFramePath, NULL, captionFrame);
CTFrameRef mainCaptionFrame = CTFramesetterCreateFrame(framesetter, CFRangeMake(0, 0), captionFramePath, NULL);
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextSetTextMatrix(context, CGAffineTransformIdentity);
CGContextTranslateCTM(context, self.buttonSize_ + 25, self.imageHeight_ + self.commentHeight_ + 6 - commentYOffset);
CGContextScaleCTM(context, 1.0, -1.0);
CTFrameDraw(mainCaptionFrame, context);
CGContextRestoreGState(context);
EDIT
(My original answer here wasn't useful; it didn't handle multiple lines. If anyone wants to see it for historical interest, look in the edit history. I've deleted it since it causes more confusion than it solves. The current answer is correct code.)
What you need to do is let CTFramesetter work out all the lines except the last one. Then you can truncate the last one by hand if required.
- (void)drawRect:(CGRect)rect
{
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextSetTextMatrix(context, CGAffineTransformIdentity);
CGRect pathRect = CGRectMake(50, 200, 200, 40);
CGPathRef path = CGPathCreateWithRect(pathRect, NULL);
CFAttributedStringRef attrString = (__bridge CFTypeRef)[self attributedString];
// Create the framesetter using the attributed string
CTFramesetterRef framesetter = CTFramesetterCreateWithAttributedString(attrString);
// Create a single frame using the entire string (CFRange(0,0))
// that fits inside of path.
CTFrameRef frame = CTFramesetterCreateFrame(framesetter, CFRangeMake(0, 0), path, NULL);
// Draw the lines except the last one
CFArrayRef lines = CTFrameGetLines(frame);
CFIndex lineCount = CFArrayGetCount(lines);
CGPoint origins[lineCount]; // I'm assuming that a stack variable is safe here.
// This would be bad if there were thousdands of lines, but that's unlikely.
CTFrameGetLineOrigins(frame, CFRangeMake(0, 0), origins);
for (CFIndex i = 0; i < lineCount - 1; ++i) {
CGContextSetTextPosition(context, pathRect.origin.x + origins[i].x, pathRect.origin.y + origins[i].y);
CTLineDraw(CFArrayGetValueAtIndex(lines, i), context);
}
///
/// HERE'S THE INTERESTING PART
///
// Make a new last line that includes the rest of the string.
// The last line is already truncated (just with no truncation mark), so we can't truncate it again
CTLineRef lastLine = CFArrayGetValueAtIndex(lines, lineCount - 1);
CFIndex lastLocation = CTLineGetStringRange(lastLine).location;
CFRange restRange = CFRangeMake(lastLocation, CFAttributedStringGetLength(attrString) - lastLocation);
CFAttributedStringRef restOfString = CFAttributedStringCreateWithSubstring(NULL, attrString, restRange);
CTLineRef restLine = CTLineCreateWithAttributedString(restOfString);
// We need to provide the truncation mark. This is an ellipsis (Cmd-semicolon).
// You could also use "\u2026". Don't use dot-dot-dot. It'll work, it's just not correct.
// Obviously you could cache this…
CTLineRef ellipsis = CTLineCreateWithAttributedString((__bridge CFTypeRef)
[[NSAttributedString alloc] initWithString:#"…"]);
// OK, now let's truncate it and draw it. I'm being a little sloppy here. If ellipsis could possibly
// be wider than the path width, then this will fail and truncateLine will be NULL and we'll crash.
// Don't do that.
CTLineRef truncatedLine = CTLineCreateTruncatedLine(restLine,
CGRectGetWidth(pathRect),
kCTLineTruncationEnd,
ellipsis);
CGContextSetTextPosition(context, pathRect.origin.x + origins[lineCount - 1].x, pathRect.origin.y + origins[lineCount - 1].y);
CTLineDraw(truncatedLine, context);
CFRelease(truncatedLine);
CFRelease(ellipsis);
CFRelease(restLine);
CFRelease(restOfString);
CFRelease(frame);
CFRelease(framesetter);
CGPathRelease(path);
}
How about something like this...
- (NSString *)truncate:(NSString *)string toWidth:(CGFloat)width withFont:(UIFont *)font {
CGSize size = [string sizeWithFont:font];
if (size.width <= width) return string;
NSString *truncatedString = [string copy];
NSString *ellipticalString = [truncatedString stringByAppendingString:#"..."];
size = [ellipticalString sizeWithFont:font];
while (size.width > width && truncatedString.length) {
truncatedString = [truncatedString substringToIndex:(truncatedString.length-1)];
ellipticalString = [truncatedString stringByAppendingString:#"..."];
size = [ellipticalString sizeWithFont:font];
}
return ellipticalString;
}
The easiest and simplest way,
NSString *theText = #"bla blah bla bhla bla bla";
NSMutableParagraphStyle *style = [[NSParagraphStyle defaultParagraphStyle] mutableCopy];
[style setLineBreakMode:NSLineBreakByTruncatingTail];
[theText drawInRect:dirtyRect withAttributes:[NSDictionary dictionaryWithObjectsAndKeys:style, NSParagraphStyleAttributeName,nil]];
for more

iPhone paint bucket

I am working on implementing a flood-fill paint-bucket tool in an iPhone app and am having some trouble with it. The user is able to draw and I would like the paint bucket to allow them to tap a spot and fill everything of that color that is connected.
Here's my idea:
1) Start at the point the user selects
2) Save points checked to a NSMutableArray so they don't get re-checked
3) If the pixel color at the current point is the same as the original clicked point, save to an array to be changed later
4) If the pixel color at the current point is different than the original, return. (boundary)
5) Once finished scanning, go through the array of pixels to change and set them to the new color.
But this is not working out so far. Any help or knowledge of how to do this would be greatly appreciated! Here is my code.
-(void)flood:(int)x:(int)y
{
//NSLog(#"Flood %i %i", x, y);
CGPoint point = CGPointMake(x, y);
NSValue *value = [NSValue valueWithCGPoint:point];
//Don't repeat checked pixels
if([self.checkedFloodPixels containsObject:value])
{
return;
}
else
{
//If not checked, mark as checked
[self.checkedFloodPixels addObject:value];
//Make sure in bounds
if([self isOutOfBounds:x:y] || [self reachedStopColor:x:y])
{
return;
}
//Go to adjacent points
[self flood:x+1:y];
[self flood:x-1:y];
[self flood:x:y+1];
[self flood:x:y-1];
}
}
- (BOOL)isOutOfBounds:(int)x:(int)y
{
BOOL outOfBounds;
if(y > self.drawImage.frame.origin.y && y < (self.drawImage.frame.origin.y + self.drawImage.frame.size.height))
{
if(x > self.drawImage.frame.origin.x && x < (self.drawImage.frame.origin.x + self.drawImage.frame.size.width))
{
outOfBounds = NO;
}
else
{
outOfBounds = YES;
}
}
else
{
outOfBounds = YES;
}
if(outOfBounds)
NSLog(#"Out of bounds");
return outOfBounds;
}
- (BOOL)reachedStopColor:(int)x:(int)y
{
CFDataRef theData = CGDataProviderCopyData(CGImageGetDataProvider(self.drawImage.image.CGImage));
const UInt8 *pixelData = CFDataGetBytePtr(theData);
int red = 0;
int green = 1;
int blue = 2;
//RGB for point being checked
float newPointR;
float newPointG;
float newPointB;
//RGB for point initially clicked
float oldPointR;
float oldPointG;
float oldPointB;
int index;
BOOL reachedStopColor = NO;
//Format oldPoint RBG - pixels are every 4 bytes so round to 4
index = lastPoint.x * lastPoint.y;
if(index % 4 != 0)
{
index -= 2;
index /= 4;
index *= 4;
}
//Get into 0.0 - 1.0 value
oldPointR = pixelData[index + red];
oldPointG = pixelData[index + green];
oldPointB = pixelData[index + blue];
oldPointR /= 255.0;
oldPointG /= 255.0;
oldPointB /= 255.0;
oldPointR *= 1000;
oldPointG *= 1000;
oldPointB *= 1000;
int oldR = oldPointR;
int oldG = oldPointG;
int oldB = oldPointB;
oldPointR = oldR / 1000.0;
oldPointG = oldG / 1000.0;
oldPointB = oldB / 1000.0;
//Format newPoint RBG
index = x*y;
if(index % 4 != 0)
{
index -= 2;
index /= 4;
index *= 4;
}
newPointR = pixelData[index + red];
newPointG = pixelData[index + green];
newPointB = pixelData[index + blue];
newPointR /= 255.0;
newPointG /= 255.0;
newPointB /= 255.0;
newPointR *= 1000;
newPointG *= 1000;
newPointB *= 1000;
int newR = newPointR;
int newG = newPointG;
int newB = newPointB;
newPointR = newR / 1000.0;
newPointG = newG / 1000.0;
newPointB = newB / 1000.0;
//Check if different color
if(newPointR < (oldPointR - 0.02f) || newPointR > (oldPointR + 0.02f))
{
if(newPointG < (oldPointG - 0.02f) || newPointG > (oldPointG + 0.02f))
{
if(newPointB < (oldPointB - 0.02f) || newPointB > (oldPointB + 0.02f))
{
reachedStopColor = YES;
NSLog(#"Different Color");
}
else
{
NSLog(#"Same Color3");
NSNumber *num = [NSNumber numberWithInt:index];
[self.pixelsToChange addObject:num];
}
}
else
{
NSLog(#"Same Color2");
NSNumber *num = [NSNumber numberWithInt:index];
[self.pixelsToChange addObject:num];
}
}
else
{
NSLog(#"Same Color1");
NSNumber *num = [NSNumber numberWithInt:index];
[self.pixelsToChange addObject:num];
}
CFRelease(theData);
if(reachedStopColor)
NSLog(#"Reached stop color");
return reachedStopColor;
}
-(void)fillAll
{
CGContextRef ctx;
CGImageRef imageRef = self.drawImage.image.CGImage;
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = malloc(height * width * 4);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
int red = 0;
int green = 1;
int blue = 2;
int index;
NSNumber *num;
for(int i = 0; i < [self.pixelsToChange count]; i++)
{
num = [self.pixelsToChange objectAtIndex:i];
index = [num intValue];
rawData[index + red] = (char)[[GameManager sharedManager] RValue];
rawData[index + green] = (char)[[GameManager sharedManager] GValue];
rawData[index + blue] = (char)[[GameManager sharedManager] BValue];
}
ctx = CGBitmapContextCreate(rawData,
CGImageGetWidth( imageRef ),
CGImageGetHeight( imageRef ),
8,
CGImageGetBytesPerRow( imageRef ),
CGImageGetColorSpace( imageRef ),
kCGImageAlphaPremultipliedLast );
imageRef = CGBitmapContextCreateImage (ctx);
UIImage* rawImage = [UIImage imageWithCGImage:imageRef];
CGContextRelease(ctx);
self.drawImage.image = rawImage;
free(rawData);
}
so i found this (i know the question might be irrelevant now but for people who are still looking for something like this it's not ) :
to get color at pixel from context (modified code from here) :
- (UIColor*) getPixelColorAtLocation:(CGPoint)point {
UIColor* color;
CGContextRef cgctx = UIGraphicsGetCurrentContext();
unsigned char* data = CGBitmapContextGetData (cgctx);
if (data != NULL) {
int offset = 4*((ContextWidth*round(point.y))+round(point.x)); //i dont know how to get ContextWidth from current context so i have it as a instance variable in my code
int alpha = data[offset];
int red = data[offset+1];
int green = data[offset+2];
int blue = data[offset+3];
color = [UIColor colorWithRed:(red/255.0f) green:(green/255.0f) blue:(blue/255.0f) alpha:(alpha/255.0f)];
}
if (data) { free(data); }
return color;
}
and the fill algorithm: is here
This is what i'm using but the fill itself is quite slow compared to CGPath drawing styles. Tho if you're rendering offscreen and/or you fill it dynamically like this it looks kinda cool :

CoreText,Copy&Paste

I want use UIMenuController in CoreText,but I can't select the words,I think I must compute the coordinate of the words,but the coordinate can't be correspondence with NSRange.Is there any function to solve it?
Sorry my English is not good~
Here is my code
CFArrayRef lines = CTFrameGetLines(leftFrame);
CFIndex i, total = CFArrayGetCount(lines);
CGFloat y;
for (i = 0; i < total; i++) {
CGPoint origins;
CTFrameGetLineOrigins( leftFrame, CFRangeMake(i, 1), &origins);
CTLineRef line = (CTLineRef)CFArrayGetValueAtIndex(lines, i);
y = self.bounds.origin.y + self.bounds.size.height - origins.y;
//CTLineDraw(line, UIGraphicsGetCurrentContext());
CFArrayRef runs = CTLineGetGlyphRuns(line);
CFIndex r, runsTotal = CFArrayGetCount(runs);
//NSLog(#"runsTotal = %d",runsTotal);
for (r = 0; r < runsTotal; r++) {
CGRect runBounds = CTRunGetImageBounds(CFArrayGetValueAtIndex(runs, r), context, CFRangeMake(0, 0));
NSLog(#"runBounds.x = %f,runBounds.y = %f",runBounds.origin.x,runBounds.origin.y);
CFIndex index = CTRunGetStringRange(CFArrayGetValueAtIndex(runs, r)).location;
//NSLog(#"%d",index);
}
}
You can use a tap gesture to get the tap position,
and you have the CTFrame,
Then You can have the needed text frame calculated.
Here is an example , how to calculate two character's frame of the tap position .
pass in CTFrame and Tap Point, get two character's frame and its range in the CTFrame
+(CGRect)parserRectWithPoint:(CGPoint)point range:(NSRange *)selectRange frameRef:(CTFrameRef)frameRef
{
CFIndex index = -1;
CGPathRef pathRef = CTFrameGetPath(frameRef);
CGRect bounds = CGPathGetBoundingBox(pathRef);
CGRect rect = CGRectZero;
// get lines
NSArray *lines = (__bridge NSArray *)CTFrameGetLines(frameRef);
if (!lines) {
return rect;
}
NSInteger lineCount = [lines count];
// prepare for origins of each line
CGPoint *origins = malloc(lineCount * sizeof(CGPoint));
if (lineCount) {
CTFrameGetLineOrigins(frameRef, CFRangeMake(0, 0), origins);
// check tap point in every line
// found, then break
for (int i = 0; i<lineCount; i++) {
CGPoint baselineOrigin = origins[i];
CTLineRef line = (__bridge CTLineRef)[lines objectAtIndex:i];
CGFloat ascent,descent,linegap; //声明字体的上行高度和下行高度和行距
CGFloat lineWidth = CTLineGetTypographicBounds(line, &ascent, &descent, &linegap);
CGRect lineFrame = CGRectMake(baselineOrigin.x, CGRectGetHeight(bounds)-baselineOrigin.y-ascent, lineWidth, ascent+descent+linegap+[LSYReadConfig shareInstance].lineSpace);
if (CGRectContainsPoint(lineFrame,point)){
// line found
CFRange stringRange = CTLineGetStringRange(line);
// glyph found
index = CTLineGetStringIndexForPosition(line, point);
CGFloat xStart = CTLineGetOffsetForStringIndex(line, index, NULL);
CGFloat xEnd;
// choose two words by default
if (index > stringRange.location+stringRange.length-2) {
xEnd = xStart;
xStart = CTLineGetOffsetForStringIndex(line,index-2,NULL);
(*selectRange).location = index-2;
}
else{
xEnd = CTLineGetOffsetForStringIndex(line,index+2,NULL);
(*selectRange).location = index;
}
// choose two character
(*selectRange).length = 2;
rect = CGRectMake(origins[i].x+xStart,baselineOrigin.y-descent,fabs(xStart-xEnd), ascent+descent);
break;
}
}
}
free(origins);
return rect;
}
core-text does not support the text-selection, you need to do it by yourself, please follow the EGOTextView as example

Filling a portion of an image with color

I'm doing an IPhone painting app. I would like to paint a particular portion of an image using touchevent to find the pixel data and then use that pixel data to paint the remaining part of the image. Using touchevent, I got the pixel value for the portion:
- (void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event {
UITouch *touch = [touches anyObject];
startPoint = [[touches anyObject] locationInView:imageView];
NSLog(#"the value of the index is %i",index);
NSString* imageName=[NSString stringWithFormat:#"roof%i", index];
tempColor = [[UIColor alloc] initWithPatternImage:[UIImage imageNamed:imageName]];
lastPoint = [touch locationInView:self.view];
lastPoint.y -= 20;
NSString *tx = [[NSString alloc]initWithFormat:#"%.0f", lastPoint.x];
NSString *ty = [[NSString alloc]initWithFormat:#"%.0f", lastPoint.y];
NSLog(#"the vale of the string is %# and %#",tx,ty);
int ix=[tx intValue];
int iy=[ty intValue];
int z=1;
NSLog(#"the vale of the string is %i and %i and z is %i",ix,iy,z);
[self getRGBAsFromImage:newImage atX:ix andY:iy count1:1];
}
Here I'm getting the pixel data for the image:
-(void)getRGBAsFromImage:(UIImage*)image atX:(int)xx andY:(int)yy count1:(int)count
{
NSMutableArray *result = [NSMutableArray arrayWithCapacity:count];
// First get the image into your data buffer
CGImageRef imageRef = [image CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = malloc(height * width * 4);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
// Now your rawData contains the image data in the RGBA8888 pixel format.
int byteIndex = (bytesPerRow * yy) + xx * bytesPerPixel;
for (int ii = 0 ; ii < count ; ++ii)
{
CGFloat red = (rawData[byteIndex] * 1.0) / 255.0;
CGFloat green = (rawData[byteIndex + 1] * 1.0) / 255.0;
CGFloat blue = (rawData[byteIndex + 2] * 1.0) / 255.0;
CGFloat alpha = (rawData[byteIndex + 3] * 1.0) / 255.0;
byteIndex += 4;
NSLog(#"the vale of the rbg of red is %f",red);
UIColor *acolor = [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
[result addObject:acolor];
}
free(rawData);
return result;
}
Using a tolerence value I'm getting the data. Here i'm struggling to paint the remaining section.
- (BOOL)cgHitTestForArea:(CGRect)area {
BOOL hit = FALSE;
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
float areaFloat = ((area.size.width * 4) * area.size.height);
unsigned char *bitmapData = malloc(areaFloat);
CGContextRef context = CGBitmapContextCreate(bitmapData,
area.size.width,
area.size.height,
8,
4*area.size.width,
colorspace,
kCGImageAlphaPremultipliedLast);
CGContextTranslateCTM(context, -area.origin.x, -area.origin.y);
[self.layer renderInContext:context];
//Seek through all pixels.
float transparentPixels = 0;
for (int i = 0; i < (int)areaFloat ; i += 4) {
//Count each transparent pixel.
if (((bitmapData[i + 3] * 1.0) / 255.0) == 0) {
transparentPixels += 1;
}
}
free(bitmapData);
//Calculate the percentage of transparent pixels.
float hitTolerance = [[self.layer valueForKey:#"hitTolerance"]floatValue];
NSLog(#"Apixels: %f hitPercent: %f",transparentPixels,(transparentPixels/areaFloat));
if ((transparentPixels/(areaFloat/4)) < hitTolerance) {
hit = TRUE;
}
CGColorSpaceRelease(colorspace);
CGContextRelease(context);
return hit;
}
Any suggestions to make this work please?
First, turning a bitmap image into an NSArray of UIColor objects is nuts. Way, way too much overhead. Work with a pixelbuffer instead. Learn how to use pointers.
http://en.wikipedia.org/wiki/Flood_fill#The_algorithm provides a good overview of a few simple techniques for performing a flood-fill — using either recursion or queues.

RGB in image processing in iphone app

I am doing an image processing in mp app. I got the pixel color from image and apply this on image by touching.. My code get the pixel color but it changes the whole image in blue color and apply that blue in image processing. I am stuck in code. But don't know what is going wrong in my code.May you please help me.
My code is:
-(void) touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event
{
UITouch *touch = [touches anyObject];
CGPoint coordinateTouch = [touch locationInView:[self view]];//where image was tapped
if (value == YES) {
self.lastColor = [self getPixelColorAtLocation:coordinateTouch];
value =NO;
}
NSLog(#"color %#",lastColor);
//[pickedColorDelegate pickedColor:(UIColor*)self.lastColor];
ListPoint point;
point.x = coordinateTouch.x;
point.y = coordinateTouch.y;
button = [UIButton buttonWithType:UIButtonTypeCustom];
button.backgroundColor = [UIColor whiteColor];
button.frame = CGRectMake(coordinateTouch.x-5, coordinateTouch.y-5, 2, 2);
//[descImageView addSubview:button];
[bgImage addSubview:button];
// Make image blurred on ImageView
if(bgImage.image)
{
CGImageRef imgRef = [[bgImage image] CGImage];
CFDataRef dataRef = CGDataProviderCopyData(CGImageGetDataProvider(imgRef));
const unsigned char *sourceBytesPtr = CFDataGetBytePtr(dataRef);
int len = CFDataGetLength(dataRef);
NSLog(#"length = %d, width = %d, height = %d, bytes per row = %d, bit per pixels = %d",
len, CGImageGetWidth(imgRef), CGImageGetHeight(imgRef), CGImageGetBytesPerRow(imgRef), CGImageGetBitsPerPixel(imgRef));
int width = CGImageGetWidth(imgRef);
int height = CGImageGetHeight(imgRef);
int widthstep = CGImageGetBytesPerRow(imgRef);
unsigned char *pixelData = (unsigned char *)malloc(len);
double wFrame = bgImage.frame.size.width;
double hFrame = bgImage.frame.size.height;
Image_Correction(sourceBytesPtr, pixelData, widthstep, width, height, wFrame, hFrame, point);
NSLog(#"finish");
NSData *data = [NSData dataWithBytes:pixelData length:len];
NSLog(#"1");
CGDataProviderRef provider = CGDataProviderCreateWithCFData((CFDataRef)data);
NSLog(#"2");
CGColorSpaceRef colorSpace2 = CGColorSpaceCreateDeviceRGB();
NSLog(#"3");
CGImageRef imageRef = CGImageCreate(width, height, 8, CGImageGetBitsPerPixel(imgRef), CGImageGetBytesPerRow(imgRef),
colorSpace2,kCGImageAlphaNoneSkipFirst|kCGBitmapByteOrder32Host,
provider, NULL, false, kCGRenderingIntentDefault);
NSLog(#"Start processing image");
UIImage *ret = [UIImage imageWithCGImage:imageRef scale:1.0 orientation:UIImageOrientationUp];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace2);
CFRelease(dataRef);
free(pixelData);
NSLog(#"4");
bgImage.image = ret;
[button removeFromSuperview];
}
}
- (UIColor*) getPixelColorAtLocation:(CGPoint)point {
UIColor* color = nil;
CGImageRef inImage = self.image.CGImage;
// Create off screen bitmap context to draw the image into. Format ARGB is 4 bytes for each pixel: Alpa, Red, Green, Blue
CGContextRef cgctx = [self createARGBBitmapContextFromImage:inImage];
if (cgctx == NULL) { return nil; /* error */ }
size_t w = CGImageGetWidth(inImage);
size_t h = CGImageGetHeight(inImage);
CGRect rect = {{0,0},{w,h}};
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(cgctx, rect, inImage);
// Now we can get a pointer to the image data associated with the bitmap
// context.
unsigned char* data = CGBitmapContextGetData (cgctx);
if (data != NULL) {
//offset locates the pixel in the data from x,y.
//4 for 4 bytes of data per pixel, w is width of one row of data.
int offset = 4*((w*round(point.y))+round(point.x));
alpha = data[offset];
red = data[offset+1];
green = data[offset+2];
blue = data[offset+3];
NSLog(#"offset: %i colors: RGB A %i %i %i %i",offset,red,green,blue,alpha);
color = [UIColor colorWithRed:(red/255.0f) green:(green/255.0f) blue:(blue/255.0f) alpha:(alpha/255.0f)];
}
// When finished, release the context
CGContextRelease(cgctx);
// Free image data memory for the context
if (data) { free(data); }
return color;
}
- (CGContextRef) createARGBBitmapContextFromImage:(CGImageRef) inImage {
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
void * bitmapData;
int bitmapByteCount;
int bitmapBytesPerRow;
// Get image width, height. We'll use the entire image.
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);
// Declare the number of bytes per row. Each pixel in the bitmap in this
// example is represented by 4 bytes; 8 bits each of red, green, blue, and
// alpha.
bitmapBytesPerRow = (pixelsWide * 4);
bitmapByteCount = (bitmapBytesPerRow * pixelsHigh);
// Use the generic RGB color space.
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL)
{
fprintf(stderr, "Error allocating color space\n");
return NULL;
}
// Allocate memory for image data. This is the destination in memory
// where any drawing to the bitmap context will be rendered.
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL)
{
fprintf (stderr, "Memory not allocated!");
CGColorSpaceRelease( colorSpace );
return NULL;
}
// Create the bitmap context. We want pre-multiplied ARGB, 8-bits
// per component. Regardless of what the source image format is
// (CMYK, Grayscale, and so on) it will be converted over to the format
// specified here by CGBitmapContextCreate.
context = CGBitmapContextCreate (bitmapData,
pixelsWide,
pixelsHigh,
8, // bits per component
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
free (bitmapData);
fprintf (stderr, "Context not created!");
}
// Make sure and release colorspace before returning
CGColorSpaceRelease( colorSpace );
return context;
}
int Image_Correction(const unsigned char *pImage, unsigned char *rImage, int widthstep, int nW, int nH, double wFrame, double hFrame, ListPoint point)
{
double ratiox = nW/wFrame;
double ratioy = nH/hFrame;
double newW, newH, ratio;
if(ratioy > ratiox)
{
newH = hFrame;
newW = nW/ratioy;
ratio = ratioy;
}
else
{
newH = nH/ratiox;
newW = wFrame;
ratio = ratiox;
}
NSLog(#"new H, W = %f, %f", newW, newH);
NSLog(#"ratiox = %f; ratioy = %f", ratiox, ratioy);
ListPoint real_point;
real_point.x = (point.x - wFrame/2 + newW/2) *ratio;
real_point.y = (point.y - hFrame/2 + newH/2)*ratio;
for(int h = 0; h < nH; h++)
{
for(int k = 0; k < nW; k++)
{
rImage[h*widthstep + k*4 + 0] = pImage[h*widthstep + k*4 + 0];
rImage[h*widthstep + k*4 + 1] = pImage[h*widthstep + k*4 + 1];
rImage[h*widthstep + k*4 + 2] = pImage[h*widthstep + k*4 + 2];
rImage[h*widthstep + k*4 + 3] = pImage[h*widthstep + k*4 + 3];
}
}
// Modify this parameter to change Blurred area
int iBlurredArea = 6;
for(int h = -ratio*iBlurredArea; h <= ratio*iBlurredArea; h++)
for(int k = -ratio*iBlurredArea; k <= ratio*iBlurredArea; k++)
{
int tempx = real_point.x + k;
int tempy = real_point.y + h;
if (((tempy - 3) > 0)&&((tempy+3) >0)&&((tempx - 3) > 0)&&((tempx + 3) >0))
{
double sumR = 0;
double sumG = 0;
double sumB = 0;
double sumA = 0;
double count = 0;
for(int m = -3; m < 4; m++)
for (int n = -3; n < 4; n++)
{
sumR = red;//sumR + pImage[(tempy + m)*widthstep + (tempx + n)*4 + 0];
sumG = green;//sumG + pImage[(tempy + m)*widthstep + (tempx + n)*4 + 1];
sumB = blue;//sumB + pImage[(tempy + m)*widthstep + (tempx + n)*4 + 2];
sumA = alpha;//sumA + pImage[(tempy + m)*widthstep + (tempx + n)*4 + 3];
count++;
}
rImage[tempy*widthstep + tempx*4 + 0] = red;//sumR/count;
rImage[tempy*widthstep + tempx*4 + 1] = green;//sumG/count;
rImage[tempy*widthstep + tempx*4 + 2] = blue;//sumB/count;
rImage[tempy*widthstep + tempx*4 + 3] = alpha;//sumA/count;
}
}
return 1;
}
Thx for seeing this code.. i think i am doing something wrong.
Thx in advance.
This seems to work for me.
UIImage* modifyImage(UIImage* image)
{
size_t w = image.size.width;
size_t h = image.size.height;
CGFloat scale = image.scale;
// Create the bitmap context
UIGraphicsBeginImageContext(CGSizeMake(w*scale, h*scale));
CGContextRef context = UIGraphicsGetCurrentContext();
// NOTE you may have to setup a rotation here based on image.imageOrientation
// but I didn't need to consider that for my images.
CGContextScaleCTM(context, scale, scale);
[image drawInRect:CGRectMake(0, 0, w, h)];
unsigned char* data = CGBitmapContextGetData (context);
if (data != NULL) {
size_t height = CGBitmapContextGetHeight(context);
size_t width = CGBitmapContextGetWidth(context);
size_t bytesPerRow = CGBitmapContextGetBytesPerRow(context);
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
// Not sure why the color info is in BGRA format
// Look at CGBitmapContextGetBitmapInfo(context) if this format isn't working for you
int offset = y * bytesPerRow + x * 4;
unsigned char* blue = &data[offset];
unsigned char* green = &data[offset+1];
unsigned char* red = &data[offset+2];
unsigned char* alpha = &data[offset+3];
int newRed = ...; // color calculation code here
int newGreen = ...;
int newBlue = ...;
// Assuming you don't want to change the original alpha value.
*red = (newRed * *alpha)/255;
*green = (newGreen * *alpha)/255;
*blue = (newBlue * *alpha)/255;
}
}
}
CGImageRef newImage = CGBitmapContextCreateImage(context);
UIImage *done = [UIImage imageWithCGImage:newImage scale:image.scale orientation: image.imageOrientation];
CGImageRelease(newImage);
UIGraphicsEndImageContext();
return done;
}