I want use UIMenuController in CoreText,but I can't select the words,I think I must compute the coordinate of the words,but the coordinate can't be correspondence with NSRange.Is there any function to solve it?
Sorry my English is not good~
Here is my code
CFArrayRef lines = CTFrameGetLines(leftFrame);
CFIndex i, total = CFArrayGetCount(lines);
CGFloat y;
for (i = 0; i < total; i++) {
CGPoint origins;
CTFrameGetLineOrigins( leftFrame, CFRangeMake(i, 1), &origins);
CTLineRef line = (CTLineRef)CFArrayGetValueAtIndex(lines, i);
y = self.bounds.origin.y + self.bounds.size.height - origins.y;
//CTLineDraw(line, UIGraphicsGetCurrentContext());
CFArrayRef runs = CTLineGetGlyphRuns(line);
CFIndex r, runsTotal = CFArrayGetCount(runs);
//NSLog(#"runsTotal = %d",runsTotal);
for (r = 0; r < runsTotal; r++) {
CGRect runBounds = CTRunGetImageBounds(CFArrayGetValueAtIndex(runs, r), context, CFRangeMake(0, 0));
NSLog(#"runBounds.x = %f,runBounds.y = %f",runBounds.origin.x,runBounds.origin.y);
CFIndex index = CTRunGetStringRange(CFArrayGetValueAtIndex(runs, r)).location;
//NSLog(#"%d",index);
}
}
You can use a tap gesture to get the tap position,
and you have the CTFrame,
Then You can have the needed text frame calculated.
Here is an example , how to calculate two character's frame of the tap position .
pass in CTFrame and Tap Point, get two character's frame and its range in the CTFrame
+(CGRect)parserRectWithPoint:(CGPoint)point range:(NSRange *)selectRange frameRef:(CTFrameRef)frameRef
{
CFIndex index = -1;
CGPathRef pathRef = CTFrameGetPath(frameRef);
CGRect bounds = CGPathGetBoundingBox(pathRef);
CGRect rect = CGRectZero;
// get lines
NSArray *lines = (__bridge NSArray *)CTFrameGetLines(frameRef);
if (!lines) {
return rect;
}
NSInteger lineCount = [lines count];
// prepare for origins of each line
CGPoint *origins = malloc(lineCount * sizeof(CGPoint));
if (lineCount) {
CTFrameGetLineOrigins(frameRef, CFRangeMake(0, 0), origins);
// check tap point in every line
// found, then break
for (int i = 0; i<lineCount; i++) {
CGPoint baselineOrigin = origins[i];
CTLineRef line = (__bridge CTLineRef)[lines objectAtIndex:i];
CGFloat ascent,descent,linegap; //声明字体的上行高度和下行高度和行距
CGFloat lineWidth = CTLineGetTypographicBounds(line, &ascent, &descent, &linegap);
CGRect lineFrame = CGRectMake(baselineOrigin.x, CGRectGetHeight(bounds)-baselineOrigin.y-ascent, lineWidth, ascent+descent+linegap+[LSYReadConfig shareInstance].lineSpace);
if (CGRectContainsPoint(lineFrame,point)){
// line found
CFRange stringRange = CTLineGetStringRange(line);
// glyph found
index = CTLineGetStringIndexForPosition(line, point);
CGFloat xStart = CTLineGetOffsetForStringIndex(line, index, NULL);
CGFloat xEnd;
// choose two words by default
if (index > stringRange.location+stringRange.length-2) {
xEnd = xStart;
xStart = CTLineGetOffsetForStringIndex(line,index-2,NULL);
(*selectRange).location = index-2;
}
else{
xEnd = CTLineGetOffsetForStringIndex(line,index+2,NULL);
(*selectRange).location = index;
}
// choose two character
(*selectRange).length = 2;
rect = CGRectMake(origins[i].x+xStart,baselineOrigin.y-descent,fabs(xStart-xEnd), ascent+descent);
break;
}
}
}
free(origins);
return rect;
}
core-text does not support the text-selection, you need to do it by yourself, please follow the EGOTextView as example
Related
Please see the image. How can I get the two line intersection point(that is green rounded point)? I want to crop the inner part of the image. The closing path is any where in the line.
context = UIGraphicsGetCurrentContext();
CGContextBeginPath(context);
CGContextSetLineWidth(context, 1.0 * self.scale);
CGContextSetLineCap(context, kCGLineCapRound);
[[UIColor redColor] setStroke];
CGPoint firstPoint = CGPointFromString([self.touchPoints objectAtIndex:0]);
CGContextMoveToPoint(context, firstPoint.x, firstPoint.y);
for (NSString *pointString in self.touchPoints) {
CGPoint point = CGPointFromString(pointString);
CGContextAddLineToPoint(context, point.x, point.y);
}
CGContextStrokePath(context);
This code used for the draw the lines. Line drawing is working fine, cropping also working fine...But the intersection point is my major problem. Please help me.
Idea, check for intersections beginning with firstline<>lastline, firstline<>secondlastline ... firstline<>thirdline => secondline<>lastline etc. This should give you the outer most intersection.
The following Code is not tested, but should help you with your problem.
typedef struct {
CGPoint startPoint;
CGPoint endPoint;
} Line;
#define CGPointNULL CGPointMake(NAN, NAN)
#define Line(_i_) {CGPointFromString(touchPoints[_i_-1]), CGPointFromString(touchPoints[_i_])};
CGPoint LineIntersects(Line *first, Line *second) {
int x1 = first->startPoint.x; int y1 = first->startPoint.y;
int x2 = first->endPoint.x; int y2 = first->endPoint.y;
int x3 = second->startPoint.x; int y3 = second->startPoint.y;
int x4 = second->endPoint.x; int y4 = second->endPoint.y;
int d = (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4);
if (d == 0) return CGPointNULL;
int xi = ((x3-x4)*(x1*y2-y1*x2)-(x1-x2)*(x3*y4-y3*x4))/d;
int yi = ((y3-y4)*(x1*y2-y1*x2)-(y1-y2)*(x3*y4-y3*x4))/d;
return CGPointMake(xi,yi);
}
static inline BOOL CGPointIsValid(CGPoint p) {
return (p.x != NAN && p.y != NAN);
}
- (CGPoint)mostOuterIntersection:(NSArray *)touchPoints {
CGPoint intersection = CGPointNULL;
int touchCount = [touchPoints count];
for(int i = 1; i<touchCount; i++) {
Line first = Line(i);
for(int j = touchCount-1; j>i+1; j--) {
Line last = Line(j);
intersection = LineIntersects(&first, &last);
if(CGPointIsValid(intersection)) {
break;
}
}
}
return intersection;
}
How can i dynamically arrange icons(UIButtons, with backgroundImages) in circle image.SOmething like it should like concentric circles.
I am using the following code,
UIImageView *container = [[UIImageView alloc] initWithFrame:CGRectMake(0.0, 0.0f, 299.0f, 298.0f)];
container.image = [UIImage imageNamed:#"AR_RF004_circle.png"];
container.center = CGPointMake(self.view.bounds.size.width/2.0, self.view.bounds.size.height/2.0);
[self.view addSubview:container];
NSArray *imagesArray = [[NSArray alloc]initWithObjects:[UIImage imageNamed:#"AR1.png"],[UIImage imageNamed:#"AR2.png"],[UIImage imageNamed:#"AR3.png"],[UIImage imageNamed:#"AR4.png"],[UIImage imageNamed:#"AR5.png"], nil];
int numberOfSections = 5;
CGFloat angleSize = 2*M_PI/numberOfSections;
for (int j = 0; j < numberOfSections; ++j) {
UIButton *sectionLabel = [[UIButton alloc] initWithFrame:CGRectMake(0.0f, 0.0f, 150.0f, 128.0f)];
[sectionLabel setBackgroundImage:[imagesArray objectAtIndex:j] forState:UIControlStateNormal];
sectionLabel.layer.anchorPoint = CGPointMake(0.9f, 0.1f);
sectionLabel.layer.position = CGPointMake(container.bounds.size.width/2.0, container.bounds.size.height/2.0); // places anchorPoint of each label directly in the center of the circle.
sectionLabel.transform = CGAffineTransformMakeRotation(angleSize*j);
[container addSubview:sectionLabel];
NSLog(#"section label x and y is %f ,%f",sectionLabel.frame.origin.x,sectionLabel.frame.origin.y);
}
[container release];
Thanks in Advance
You can use this piece of code:
UIButton *currentButton;
int buttonCount = 20;
int radius = 200;
float angleBetweenButtons = (2 * M_PI) / buttonCount;
float x = 0;
float y = 0;
CGAffineTransform rotationTransform;
for (int i = 0; i < buttonCount; i++)
{
// get your button from array or something
currentButton = ...
...
x = radius + cosf(i * angleBetweenButtons) * radius;
y = radius + sinf(i * angleBetweenButtons) * radius;
rotationTransform = CGAffineTransformIdentity;
rotationTransform = CGAffineTransformRotate(rotationTransform, (i * angleBetweenButtons));
currentButton.center = CGPointMake(x, y);
currentButton.transform = rotationTransform;
}
I think you can do something like this
sectionLabel.center = container.center;
I have not checked other code.. Simplest way for this
Below is the code which I am using and working fine in simulator iOS 5+, but giving issue in simulator 4.3
UITextPosition *begin = [self.tvQuestion positionFromPosition:self.tvQuestion.beginningOfDocument offset:nrange.location];
Following is the error statement:
Terminating app due to uncaught exception 'NSInvalidArgumentException', reason: '-[UITextView beginningOfDocument]: unrecognized selector sent to instance 0x5586050'
Could somebody advice on this? Thanks in advance!
The UITextInput protocol includes a beginningOfDocument selector since iOS 3.2.
But UITextView only adopted UITextInput since iOS 5.0. It was previously conforming to the UITextInputTraits protocol.
Source: search for UITextView here http://developer.apple.com/library/ios/#releasenotes/General/iOS50APIDiff/index.html
If you want it to work on iOS 4.x and later, you have to perform a check and do the computation by yourself on iOS 4:
CGRect newRect = CGRectZero;
if ([UITextView conformsToProtocol:#protocol(UITextInput)]) {
// iOS 5 and later
UITextPosition *begin = [self.tvQuestion positionFromPosition:self.tvQuestion.beginningOfDocument offset:nrange.location];
UITextPosition *end = [self.tvQuestion positionFromPosition:begin offset:nrange.length];
UITextRange *textRange = [self.tvQuestion textRangeFromPosition:begin toPosition:end];
newRect = [self.tvQuestion firstRectForRange:textRange];
} else {
// iOS 3.2 to 4.3
// Compute text position manually
#define TEXT_VIEW_PADDING 8.f
NSString *textContent = [self.tvQuestion.text substringToIndex:NSMaxRange(nrange)];
NSDictionary *ctFontDescriptorAttributes = [NSDictionary dictionaryWithObjectsAndKeys:
self.tvQuestion.font.familyName, kCTFontFamilyNameAttribute,
// Uncomment for bold fonts
// [NSDictionary dictionaryWithObjectsAndKeys:
// [NSNumber numberWithInt:kCTFontBoldTrait], kCTFontSymbolicTrait,
// nil], kCTFontTraitsAttribute,
nil];
CTFontDescriptorRef ctFontDescriptor = CTFontDescriptorCreateWithAttributes((CFDictionaryRef)ctFontDescriptorAttributes);
CTFontRef ctFont = CTFontCreateWithFontDescriptor(ctFontDescriptor, self.tvQuestion.font.pointSize, NULL);
NSAttributedString *attributedText = [[NSAttributedString alloc] initWithString:textContent
attributes:[NSDictionary dictionaryWithObjectsAndKeys:
(id)ctFont, kCTFontAttributeName,
nil]];
CTFramesetterRef framesetter = CTFramesetterCreateWithAttributedString((CFAttributedStringRef)attributedText);
CFRange globalRange = CFRangeMake(0, [attributedText length]);
CGRect textRect = CGRectInset((CGRect){CGPointZero, self.tvQuestion.contentSize}, TEXT_VIEW_PADDING, TEXT_VIEW_PADDING);
CTFrameRef frame = CTFramesetterCreateFrame(framesetter, globalRange, CGPathCreateWithRect((CGRect){CGPointZero, textRect.size}, NULL), NULL);
CFArrayRef lines = CTFrameGetLines(frame);
NSInteger nbLines = CFArrayGetCount(lines);
CGPoint *lineOrigins = calloc(sizeof(CGPoint), nbLines);
CTFrameGetLineOrigins(frame, CFRangeMake(0, 0), lineOrigins);
CGFloat ascent = CTFontGetAscent(ctFont);
CGFloat descent = CTFontGetDescent(ctFont);
CGFloat leading = CTFontGetLeading(ctFont);
if (leading < 0) {
leading = 0;
}
leading = floor(leading + 0.5);
CGFloat lineHeight = floor(ascent + 0.5) + floor(descent + 0.5) + leading;
CGFloat firstLineYOffset = 0.f;
for (NSInteger i = 0; i < nbLines; i++) {
CTLineRef line = CFArrayGetValueAtIndex(lines, i);
CFRange lineRange = CTLineGetStringRange(line);
CGPoint lineOrigin = lineOrigins[i];
CGFloat ascent, descent, leading, width;
width = CTLineGetTypographicBounds(line, &ascent, &descent, &leading);
if (i == 0) {
firstLineYOffset = lineOrigin.y;
}
lineOrigin.y = (lineOrigin.y - firstLineYOffset) * -1.f;
if (nrange.location >= lineRange.location && nrange.location < lineRange.location + lineRange.length) {
CGFloat secOffset;
CGFloat startOffset = CTLineGetOffsetForStringIndex(line, nrange.location, &secOffset);
CGFloat rectWidth;
if (nrange.location + nrange.length <= lineRange.location + lineRange.length) {
CGFloat endOffset = CTLineGetOffsetForStringIndex(line, nrange.location + nrange.length, &secOffset);
rectWidth = endOffset - startOffset;
} else {
rectWidth = width - 5 - startOffset;
}
newRect = (CGRect)
{
{
lineOrigin.x + TEXT_VIEW_PADDING + startOffset,
lineOrigin.y + TEXT_VIEW_PADDING + i
}, {
rectWidth,
lineHeight
}
};
break;
}
}
free(lineOrigins);
CFRelease(frame);
CFRelease(framesetter);
CFRelease(ctFont);
CFRelease(ctFontDescriptor);
}
Although UITextView does not conform to UITextInput protocol on iOS 4.x, its subviews does. The following code works iOS 4.x and later.
UIView<UITextInput> *t = [textView.subviews objectAtIndex:0];
assert([t conformsToProtocol:#protocol(UITextInput)]);
// use t for the view conforming to UITextInput
UITextPosition *beginning = t.beginningOfDocument; // OK
I Use CTFrameGetLineOrigins for get the origin of each line. But it always returns wrong line origin.
The code:
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextSetTextMatrix(context, CGAffineTransformIdentity);
CGContextTranslateCTM(context, 0, self.bounds.size.height);
CGContextScaleCTM(context, 1.0, -1.0);
CTFontRef font = CTFontCreateUIFontForLanguage(kCTFontUserFontType, self.font.pointSize, NULL);
CFAttributedStringSetAttribute(attrString, CFRangeMake(0, CFAttributedStringGetLength(attrString)), kCTForegroundColorAttributeName, self.textColor.CGColor);
CFAttributedStringSetAttribute(attrString, CFRangeMake(0, CFAttributedStringGetLength(attrString)), kCTFontAttributeName, font);
CTFramesetterRef framesetter = CTFramesetterCreateWithAttributedString(attrString);
CGPathRef path = CGPathCreateWithRect(CGRectInset(rect, TEXT_MARGIN, TEXT_MARGIN), NULL);
CTFrameRef frame = CTFramesetterCreateFrame(framesetter, CFRangeMake(0, 0), path, NULL);
CFArrayRef lines = CTFrameGetLines(frame);
size_t numOfLines = CFArrayGetCount(lines);
CGPoint lineOrigins[numOfLines];
CTFrameGetLineOrigins(frame, CFRangeMake(0, 0), lineOrigins);
for (CFIndex i = 0; i < numOfLines; i++) {
CTLineRef line = CFArrayGetValueAtIndex(lines, i);
CFArrayRef runs = CTLineGetGlyphRuns(line);
size_t numOfRuns = CFArrayGetCount(runs);
for (CFIndex j = 0; j < numOfRuns; j++) {
CTRunRef run = CFArrayGetValueAtIndex(runs, j);
CGRect strFrame = CGRectZero;
CGFloat width = 0;
CGFloat height = 0;
CGFloat leading = 0;
CGFloat ascent = 0;
CGFloat descent = 0;
CFRange strRange = CTRunGetStringRange(run);
CGFloat offsetX = CTLineGetOffsetForStringIndex(line, strRange.location, NULL);
width = (CGFloat)CTRunGetTypographicBounds(run, CFRangeMake(0, 0), &ascent, &descent, &leading);
width += leading;
height = ascent + descent;
NSLog(#"Line %ld : Offset Y: %f", i+1, lineOrigins[i].y);
strFrame = CGRectMake(lineOrigins[i].x + offsetX + TEXT_MARGIN, lineOrigins[i].y + TEXT_MARGIN - descent, width, height);
strFrame = CGRectIntegral(strFrame);
CFDictionaryRef attr = CTRunGetAttributes(run);
if (attr != NULL) {
CFStringRef url = CFDictionaryGetValue(attr, kCTFontURLAttribute);
if (url != NULL) {
[self.wordsFrame addObject:[NSDictionary dictionaryWithObjectsAndKeys:(__bridge NSString *)url, #"URL", NSStringFromCGRect(strFrame), #"frame", nil]];
}
}
}
}
CTFrameDraw(frame, context);
CFRelease(font);
CGPathRelease(path);
CFRelease(frame);
CFRelease(framesetter);
Results:
2012-05-30 12:58:21.464 hjday[9362:707] Line 1 : Offset Y: 97.000000
2012-05-30 12:58:21.466 hjday[9362:707] Line 2 : Offset Y: 68.000000
2012-05-30 12:58:21.472 hjday[9362:707] Line 3 : Offset Y: 39.000000
Can anyone help me finger out what's wrong with my code? Or is there any other way(s) to get the origin of each CTRun?
I got it:
strFrame = CGRectMake(lineOrigins[i].x + offsetX + TEXT_MARGIN, self.bounds.size.height - (lineOrigins[i].y + TEXT_MARGIN - descent) - height, width, height);
need flip the y coordinate
I am working on implementing a flood-fill paint-bucket tool in an iPhone app and am having some trouble with it. The user is able to draw and I would like the paint bucket to allow them to tap a spot and fill everything of that color that is connected.
Here's my idea:
1) Start at the point the user selects
2) Save points checked to a NSMutableArray so they don't get re-checked
3) If the pixel color at the current point is the same as the original clicked point, save to an array to be changed later
4) If the pixel color at the current point is different than the original, return. (boundary)
5) Once finished scanning, go through the array of pixels to change and set them to the new color.
But this is not working out so far. Any help or knowledge of how to do this would be greatly appreciated! Here is my code.
-(void)flood:(int)x:(int)y
{
//NSLog(#"Flood %i %i", x, y);
CGPoint point = CGPointMake(x, y);
NSValue *value = [NSValue valueWithCGPoint:point];
//Don't repeat checked pixels
if([self.checkedFloodPixels containsObject:value])
{
return;
}
else
{
//If not checked, mark as checked
[self.checkedFloodPixels addObject:value];
//Make sure in bounds
if([self isOutOfBounds:x:y] || [self reachedStopColor:x:y])
{
return;
}
//Go to adjacent points
[self flood:x+1:y];
[self flood:x-1:y];
[self flood:x:y+1];
[self flood:x:y-1];
}
}
- (BOOL)isOutOfBounds:(int)x:(int)y
{
BOOL outOfBounds;
if(y > self.drawImage.frame.origin.y && y < (self.drawImage.frame.origin.y + self.drawImage.frame.size.height))
{
if(x > self.drawImage.frame.origin.x && x < (self.drawImage.frame.origin.x + self.drawImage.frame.size.width))
{
outOfBounds = NO;
}
else
{
outOfBounds = YES;
}
}
else
{
outOfBounds = YES;
}
if(outOfBounds)
NSLog(#"Out of bounds");
return outOfBounds;
}
- (BOOL)reachedStopColor:(int)x:(int)y
{
CFDataRef theData = CGDataProviderCopyData(CGImageGetDataProvider(self.drawImage.image.CGImage));
const UInt8 *pixelData = CFDataGetBytePtr(theData);
int red = 0;
int green = 1;
int blue = 2;
//RGB for point being checked
float newPointR;
float newPointG;
float newPointB;
//RGB for point initially clicked
float oldPointR;
float oldPointG;
float oldPointB;
int index;
BOOL reachedStopColor = NO;
//Format oldPoint RBG - pixels are every 4 bytes so round to 4
index = lastPoint.x * lastPoint.y;
if(index % 4 != 0)
{
index -= 2;
index /= 4;
index *= 4;
}
//Get into 0.0 - 1.0 value
oldPointR = pixelData[index + red];
oldPointG = pixelData[index + green];
oldPointB = pixelData[index + blue];
oldPointR /= 255.0;
oldPointG /= 255.0;
oldPointB /= 255.0;
oldPointR *= 1000;
oldPointG *= 1000;
oldPointB *= 1000;
int oldR = oldPointR;
int oldG = oldPointG;
int oldB = oldPointB;
oldPointR = oldR / 1000.0;
oldPointG = oldG / 1000.0;
oldPointB = oldB / 1000.0;
//Format newPoint RBG
index = x*y;
if(index % 4 != 0)
{
index -= 2;
index /= 4;
index *= 4;
}
newPointR = pixelData[index + red];
newPointG = pixelData[index + green];
newPointB = pixelData[index + blue];
newPointR /= 255.0;
newPointG /= 255.0;
newPointB /= 255.0;
newPointR *= 1000;
newPointG *= 1000;
newPointB *= 1000;
int newR = newPointR;
int newG = newPointG;
int newB = newPointB;
newPointR = newR / 1000.0;
newPointG = newG / 1000.0;
newPointB = newB / 1000.0;
//Check if different color
if(newPointR < (oldPointR - 0.02f) || newPointR > (oldPointR + 0.02f))
{
if(newPointG < (oldPointG - 0.02f) || newPointG > (oldPointG + 0.02f))
{
if(newPointB < (oldPointB - 0.02f) || newPointB > (oldPointB + 0.02f))
{
reachedStopColor = YES;
NSLog(#"Different Color");
}
else
{
NSLog(#"Same Color3");
NSNumber *num = [NSNumber numberWithInt:index];
[self.pixelsToChange addObject:num];
}
}
else
{
NSLog(#"Same Color2");
NSNumber *num = [NSNumber numberWithInt:index];
[self.pixelsToChange addObject:num];
}
}
else
{
NSLog(#"Same Color1");
NSNumber *num = [NSNumber numberWithInt:index];
[self.pixelsToChange addObject:num];
}
CFRelease(theData);
if(reachedStopColor)
NSLog(#"Reached stop color");
return reachedStopColor;
}
-(void)fillAll
{
CGContextRef ctx;
CGImageRef imageRef = self.drawImage.image.CGImage;
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = malloc(height * width * 4);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
int red = 0;
int green = 1;
int blue = 2;
int index;
NSNumber *num;
for(int i = 0; i < [self.pixelsToChange count]; i++)
{
num = [self.pixelsToChange objectAtIndex:i];
index = [num intValue];
rawData[index + red] = (char)[[GameManager sharedManager] RValue];
rawData[index + green] = (char)[[GameManager sharedManager] GValue];
rawData[index + blue] = (char)[[GameManager sharedManager] BValue];
}
ctx = CGBitmapContextCreate(rawData,
CGImageGetWidth( imageRef ),
CGImageGetHeight( imageRef ),
8,
CGImageGetBytesPerRow( imageRef ),
CGImageGetColorSpace( imageRef ),
kCGImageAlphaPremultipliedLast );
imageRef = CGBitmapContextCreateImage (ctx);
UIImage* rawImage = [UIImage imageWithCGImage:imageRef];
CGContextRelease(ctx);
self.drawImage.image = rawImage;
free(rawData);
}
so i found this (i know the question might be irrelevant now but for people who are still looking for something like this it's not ) :
to get color at pixel from context (modified code from here) :
- (UIColor*) getPixelColorAtLocation:(CGPoint)point {
UIColor* color;
CGContextRef cgctx = UIGraphicsGetCurrentContext();
unsigned char* data = CGBitmapContextGetData (cgctx);
if (data != NULL) {
int offset = 4*((ContextWidth*round(point.y))+round(point.x)); //i dont know how to get ContextWidth from current context so i have it as a instance variable in my code
int alpha = data[offset];
int red = data[offset+1];
int green = data[offset+2];
int blue = data[offset+3];
color = [UIColor colorWithRed:(red/255.0f) green:(green/255.0f) blue:(blue/255.0f) alpha:(alpha/255.0f)];
}
if (data) { free(data); }
return color;
}
and the fill algorithm: is here
This is what i'm using but the fill itself is quite slow compared to CGPath drawing styles. Tho if you're rendering offscreen and/or you fill it dynamically like this it looks kinda cool :