I have an image and a set of four points (describing a quadrilateral Q). I want to transform this image so that it is fits the quadrilateral Q. Photoshop calls this transformation "Distort." But according to the source of this quadrilateral (the perspective of the image moving in space), it is in fact the combination of a scale, a rotation and a perspective matrix.
I am wondering if this is possible using a CATransform3D 4x4 matrix. Do you have any hints on how to do that? I've tried to take the four points and build 16 equations (out of A' = A x u) but it did not work: I'm not sure of what I should use as z, z', w and w' coefficients…
The following picture shows what I want to do:
Here are some examples of points:
276.523, 236.438, 517.656, 208.945, 275.984, 331.285, 502.23, 292.344
261.441, 235.059, 515.09, 211.5, 263.555, 327.066, 500.734, 295
229.031, 161.277, 427.125, 192.562, 229.16, 226, 416.48, 256
I've created a kit for doing this on iOS: https://github.com/hfossli/AGGeometryKit/
Make sure your anchor point is top left (CGPointZero).
+ (CATransform3D)rectToQuad:(CGRect)rect
quadTL:(CGPoint)topLeft
quadTR:(CGPoint)topRight
quadBL:(CGPoint)bottomLeft
quadBR:(CGPoint)bottomRight
{
return [self rectToQuad:rect quadTLX:topLeft.x quadTLY:topLeft.y quadTRX:topRight.x quadTRY:topRight.y quadBLX:bottomLeft.x quadBLY:bottomLeft.y quadBRX:bottomRight.x quadBRY:bottomRight.y];
}
+ (CATransform3D)rectToQuad:(CGRect)rect
quadTLX:(CGFloat)x1a
quadTLY:(CGFloat)y1a
quadTRX:(CGFloat)x2a
quadTRY:(CGFloat)y2a
quadBLX:(CGFloat)x3a
quadBLY:(CGFloat)y3a
quadBRX:(CGFloat)x4a
quadBRY:(CGFloat)y4a
{
CGFloat X = rect.origin.x;
CGFloat Y = rect.origin.y;
CGFloat W = rect.size.width;
CGFloat H = rect.size.height;
CGFloat y21 = y2a - y1a;
CGFloat y32 = y3a - y2a;
CGFloat y43 = y4a - y3a;
CGFloat y14 = y1a - y4a;
CGFloat y31 = y3a - y1a;
CGFloat y42 = y4a - y2a;
CGFloat a = -H*(x2a*x3a*y14 + x2a*x4a*y31 - x1a*x4a*y32 + x1a*x3a*y42);
CGFloat b = W*(x2a*x3a*y14 + x3a*x4a*y21 + x1a*x4a*y32 + x1a*x2a*y43);
CGFloat c = H*X*(x2a*x3a*y14 + x2a*x4a*y31 - x1a*x4a*y32 + x1a*x3a*y42) - H*W*x1a*(x4a*y32 - x3a*y42 + x2a*y43) - W*Y*(x2a*x3a*y14 + x3a*x4a*y21 + x1a*x4a*y32 + x1a*x2a*y43);
CGFloat d = H*(-x4a*y21*y3a + x2a*y1a*y43 - x1a*y2a*y43 - x3a*y1a*y4a + x3a*y2a*y4a);
CGFloat e = W*(x4a*y2a*y31 - x3a*y1a*y42 - x2a*y31*y4a + x1a*y3a*y42);
CGFloat f = -(W*(x4a*(Y*y2a*y31 + H*y1a*y32) - x3a*(H + Y)*y1a*y42 + H*x2a*y1a*y43 + x2a*Y*(y1a - y3a)*y4a + x1a*Y*y3a*(-y2a + y4a)) - H*X*(x4a*y21*y3a - x2a*y1a*y43 + x3a*(y1a - y2a)*y4a + x1a*y2a*(-y3a + y4a)));
CGFloat g = H*(x3a*y21 - x4a*y21 + (-x1a + x2a)*y43);
CGFloat h = W*(-x2a*y31 + x4a*y31 + (x1a - x3a)*y42);
CGFloat i = W*Y*(x2a*y31 - x4a*y31 - x1a*y42 + x3a*y42) + H*(X*(-(x3a*y21) + x4a*y21 + x1a*y43 - x2a*y43) + W*(-(x3a*y2a) + x4a*y2a + x2a*y3a - x4a*y3a - x2a*y4a + x3a*y4a));
const double kEpsilon = 0.0001;
if(fabs(i) < kEpsilon)
{
i = kEpsilon* (i > 0 ? 1.0 : -1.0);
}
CATransform3D transform = {a/i, d/i, 0, g/i, b/i, e/i, 0, h/i, 0, 0, 1, 0, c/i, f/i, 0, 1.0};
return transform;
}
I take no credit for this code. All I did was scouring the internet and put together various incomplete answers.
Here is a sample project which applies code from hfossli's answer above and creates a category on UIView which sets the frame and applies the transform in one call:
https://github.com/joshrl/FreeTransform
UIView+Quadrilateral code :
#import <UIKit/UIKit.h>
#import <QuartzCore/QuartzCore.h>
#interface UIView (Quadrilateral)
//Sets frame to bounding box of quad and applies transform
- (void)transformToFitQuadTopLeft:(CGPoint)tl topRight:(CGPoint)tr bottomLeft:(CGPoint)bl bottomRight:(CGPoint)br;
#end
#implementation UIView (Quadrilateral)
- (void)transformToFitQuadTopLeft:(CGPoint)tl topRight:(CGPoint)tr bottomLeft:(CGPoint)bl bottomRight:(CGPoint)br
{
NSAssert(CGPointEqualToPoint(self.layer.anchorPoint, CGPointZero),#"Anchor point must be (0,0)!");
CGRect boundingBox = [[self class] boundingBoxForQuadTR:tr tl:tl bl:bl br:br];
self.frame = boundingBox;
CGPoint frameTopLeft = boundingBox.origin;
CATransform3D transform = [[self class] rectToQuad:self.bounds
quadTL:CGPointMake(tl.x-frameTopLeft.x, tl.y-frameTopLeft.y)
quadTR:CGPointMake(tr.x-frameTopLeft.x, tr.y-frameTopLeft.y)
quadBL:CGPointMake(bl.x-frameTopLeft.x, bl.y-frameTopLeft.y)
quadBR:CGPointMake(br.x-frameTopLeft.x, br.y-frameTopLeft.y)];
self.layer.transform = transform;
}
+ (CGRect)boundingBoxForQuadTR:(CGPoint)tr tl:(CGPoint)tl bl:(CGPoint)bl br:(CGPoint)br
{
CGRect boundingBox = CGRectZero;
CGFloat xmin = MIN(MIN(MIN(tr.x, tl.x), bl.x),br.x);
CGFloat ymin = MIN(MIN(MIN(tr.y, tl.y), bl.y),br.y);
CGFloat xmax = MAX(MAX(MAX(tr.x, tl.x), bl.x),br.x);
CGFloat ymax = MAX(MAX(MAX(tr.y, tl.y), bl.y),br.y);
boundingBox.origin.x = xmin;
boundingBox.origin.y = ymin;
boundingBox.size.width = xmax - xmin;
boundingBox.size.height = ymax - ymin;
return boundingBox;
}
+ (CATransform3D)rectToQuad:(CGRect)rect
quadTL:(CGPoint)topLeft
quadTR:(CGPoint)topRight
quadBL:(CGPoint)bottomLeft
quadBR:(CGPoint)bottomRight
{
return [self rectToQuad:rect quadTLX:topLeft.x quadTLY:topLeft.y quadTRX:topRight.x quadTRY:topRight.y quadBLX:bottomLeft.x quadBLY:bottomLeft.y quadBRX:bottomRight.x quadBRY:bottomRight.y];
}
+ (CATransform3D)rectToQuad:(CGRect)rect
quadTLX:(CGFloat)x1a
quadTLY:(CGFloat)y1a
quadTRX:(CGFloat)x2a
quadTRY:(CGFloat)y2a
quadBLX:(CGFloat)x3a
quadBLY:(CGFloat)y3a
quadBRX:(CGFloat)x4a
quadBRY:(CGFloat)y4a
{
CGFloat X = rect.origin.x;
CGFloat Y = rect.origin.y;
CGFloat W = rect.size.width;
CGFloat H = rect.size.height;
CGFloat y21 = y2a - y1a;
CGFloat y32 = y3a - y2a;
CGFloat y43 = y4a - y3a;
CGFloat y14 = y1a - y4a;
CGFloat y31 = y3a - y1a;
CGFloat y42 = y4a - y2a;
CGFloat a = -H*(x2a*x3a*y14 + x2a*x4a*y31 - x1a*x4a*y32 + x1a*x3a*y42);
CGFloat b = W*(x2a*x3a*y14 + x3a*x4a*y21 + x1a*x4a*y32 + x1a*x2a*y43);
CGFloat c = H*X*(x2a*x3a*y14 + x2a*x4a*y31 - x1a*x4a*y32 + x1a*x3a*y42) - H*W*x1a*(x4a*y32 - x3a*y42 + x2a*y43) - W*Y*(x2a*x3a*y14 + x3a*x4a*y21 + x1a*x4a*y32 + x1a*x2a*y43);
CGFloat d = H*(-x4a*y21*y3a + x2a*y1a*y43 - x1a*y2a*y43 - x3a*y1a*y4a + x3a*y2a*y4a);
CGFloat e = W*(x4a*y2a*y31 - x3a*y1a*y42 - x2a*y31*y4a + x1a*y3a*y42);
CGFloat f = -(W*(x4a*(Y*y2a*y31 + H*y1a*y32) - x3a*(H + Y)*y1a*y42 + H*x2a*y1a*y43 + x2a*Y*(y1a - y3a)*y4a + x1a*Y*y3a*(-y2a + y4a)) - H*X*(x4a*y21*y3a - x2a*y1a*y43 + x3a*(y1a - y2a)*y4a + x1a*y2a*(-y3a + y4a)));
CGFloat g = H*(x3a*y21 - x4a*y21 + (-x1a + x2a)*y43);
CGFloat h = W*(-x2a*y31 + x4a*y31 + (x1a - x3a)*y42);
CGFloat i = W*Y*(x2a*y31 - x4a*y31 - x1a*y42 + x3a*y42) + H*(X*(-(x3a*y21) + x4a*y21 + x1a*y43 - x2a*y43) + W*(-(x3a*y2a) + x4a*y2a + x2a*y3a - x4a*y3a - x2a*y4a + x3a*y4a));
const double kEpsilon = 0.0001;
if(fabs(i) < kEpsilon)
{
i = kEpsilon* (i > 0 ? 1.0 : -1.0);
}
CATransform3D transform = {a/i, d/i, 0, g/i, b/i, e/i, 0, h/i, 0, 0, 1, 0, c/i, f/i, 0, 1.0};
return transform;
}
#end
We finally got this to work. We've tried several different methods, but most were failing. And some were even retrieving a non identity matrix when giving the same points as input and outputs (for example, the one from KennyTM… we must have been missing something there).
Using OpenCV as following, we get a CATransform3D ready to be used on a CAAnimation layer:
+ (CATransform3D)transformQuadrilateral:(Quadrilateral)origin toQuadrilateral:(Quadrilateral)destination {
CvPoint2D32f *cvsrc = [self openCVMatrixWithQuadrilateral:origin];
CvMat *src_mat = cvCreateMat( 4, 2, CV_32FC1 );
cvSetData(src_mat, cvsrc, sizeof(CvPoint2D32f));
CvPoint2D32f *cvdst = [self openCVMatrixWithQuadrilateral:destination];
CvMat *dst_mat = cvCreateMat( 4, 2, CV_32FC1 );
cvSetData(dst_mat, cvdst, sizeof(CvPoint2D32f));
CvMat *H = cvCreateMat(3,3,CV_32FC1);
cvFindHomography(src_mat, dst_mat, H);
cvReleaseMat(&src_mat);
cvReleaseMat(&dst_mat);
CATransform3D transform = [self transform3DWithCMatrix:H->data.fl];
cvReleaseMat(&H);
return transform;
}
+ (CvPoint2D32f *)openCVMatrixWithQuadrilateral:(Quadrilateral)origin {
CvPoint2D32f *cvsrc = (CvPoint2D32f *)malloc(4*sizeof(CvPoint2D32f));
cvsrc[0].x = origin.upperLeft.x;
cvsrc[0].y = origin.upperLeft.y;
cvsrc[1].x = origin.upperRight.x;
cvsrc[1].y = origin.upperRight.y;
cvsrc[2].x = origin.lowerRight.x;
cvsrc[2].y = origin.lowerRight.y;
cvsrc[3].x = origin.lowerLeft.x;
cvsrc[3].y = origin.lowerLeft.y;
return cvsrc;
}
+ (CATransform3D)transform3DWithCMatrix:(float *)matrix {
CATransform3D transform = CATransform3DIdentity;
transform.m11 = matrix[0];
transform.m21 = matrix[1];
transform.m41 = matrix[2];
transform.m12 = matrix[3];
transform.m22 = matrix[4];
transform.m42 = matrix[5];
transform.m14 = matrix[6];
transform.m24 = matrix[7];
transform.m44 = matrix[8];
return transform;
}
With 100% thanks to JoshRL, here's a Swift version of JoshRL's class.
This has been completely and totally debugged. Lines that suffer the "too long in Swift" issue have been refactored and destruction tested. It is working flawlessly in high-volume production.
Couldn't be easier to use. Example showing how to use in Swift below.
2016 Swift version... full, working, copy and paste solution
Refreshed for 2022 ! Drop in code with current syntax etc.
// JoshQuadView in Swift
// from: https://stackoverflow.com/a/18606029/294884
// NB: JoshRL uses the ordering convention
// "topleft, topright, bottomleft, bottomright"
// which is different from "clockwise from topleft".
// Note: is not meant to handle concave.
import UIKit
class JoshQuadView: UIImageView { // or UIView, as preferred
func transformToFitQuadTopLeft(tl: CGPoint, tr: CGPoint, bl: CGPoint, br: CGPoint) {
guard self.layer.anchorPoint == .zero else {
print("suck")
return
}
let b = boundingBoxForQuadTR(tl, tr, bl, br)
self.frame = b
self.layer.transform = rectToQuad(bounds,
.init(x: tl.x-b.origin.x, y: tl.y-b.origin.y),
.init(x: tr.x-b.origin.x, y: tr.y-b.origin.y),
.init(x: bl.x-b.origin.x, y: bl.y-b.origin.y),
.init(x: br.x-b.origin.x, y: br.y-b.origin.y))
}
func boundingBoxForQuadTR(_ tl: CGPoint, _ tr: CGPoint, _ bl: CGPoint, _ br: CGPoint) -> CGRect {
var b: CGRect = .zero
let xmin: CGFloat = min(min(min(tr.x,tl.x),bl.x),br.x)
let ymin: CGFloat = min(min(min(tr.y,tl.y),bl.y),br.y)
let xmax: CGFloat = max(max(max(tr.x,tl.x),bl.x),br.x)
let ymax: CGFloat = max(max(max(tr.y,tl.y),bl.y),br.y)
b.origin.x = xmin
b.origin.y = ymin
b.size.width = xmax - xmin
b.size.height = ymax - ymin
return b
}
func rectToQuad(_ rect: CGRect,
_ topLeft: CGPoint, _ topRight: CGPoint, _ bottomLeft: CGPoint, _ bottomRight: CGPoint) -> CATransform3D {
rectToQuadCalculation(rect,
topLeft.x, topLeft.y,
topRight.x, topRight.y,
bottomLeft.x, bottomLeft.y,
bottomRight.x, bottomRight.y)
}
func rectToQuadCalculation(_ rect: CGRect,
_ x1a: CGFloat, _ y1a: CGFloat,
_ x2a: CGFloat, _ y2a: CGFloat,
_ x3a: CGFloat, _ y3a: CGFloat,
_ x4a: CGFloat, _ y4a: CGFloat) -> CATransform3D {
let XX = rect.origin.x
let YY = rect.origin.y
let WW = rect.size.width
let HH = rect.size.height
let y21 = y2a - y1a
let y32 = y3a - y2a
let y43 = y4a - y3a
let y14 = y1a - y4a
let y31 = y3a - y1a
let y42 = y4a - y2a
let a = -HH * (x2a*x3a*y14 + x2a*x4a*y31 - x1a*x4a*y32 + x1a*x3a*y42)
let b = WW * (x2a*x3a*y14 + x3a*x4a*y21 + x1a*x4a*y32 + x1a*x2a*y43)
let c0 = -HH * WW * x1a * (x4a*y32 - x3a*y42 + x2a*y43)
let cx = HH * XX * (x2a*x3a*y14 + x2a*x4a*y31 - x1a*x4a*y32 + x1a*x3a*y42)
let cy = -WW * YY * (x2a*x3a*y14 + x3a*x4a*y21 + x1a*x4a*y32 + x1a*x2a*y43)
let c = c0 + cx + cy
let d = HH * (-x4a*y21*y3a + x2a*y1a*y43 - x1a*y2a*y43 - x3a*y1a*y4a + x3a*y2a*y4a)
let e = WW * (x4a*y2a*y31 - x3a*y1a*y42 - x2a*y31*y4a + x1a*y3a*y42)
let f0 = -WW * HH * (x4a * y1a * y32 - x3a * y1a * y42 + x2a * y1a * y43)
let fx = HH * XX * (x4a * y21 * y3a - x2a * y1a * y43 - x3a * y21 * y4a + x1a * y2a * y43)
let fy = -WW * YY * (x4a * y2a * y31 - x3a * y1a * y42 - x2a * y31 * y4a + x1a * y3a * y42)
let f = f0 + fx + fy
let g = HH * (x3a * y21 - x4a * y21 + (-x1a + x2a) * y43)
let h = WW * (-x2a * y31 + x4a * y31 + (x1a - x3a) * y42)
let iy = WW * YY * (x2a * y31 - x4a * y31 - x1a * y42 + x3a * y42)
let ix = HH * XX * (x4a * y21 - x3a * y21 + x1a * y43 - x2a * y43)
let i0 = HH * WW * (x3a * y42 - x4a * y32 - x2a * y43)
var i = i0 + ix + iy
let kEpsilon: CGFloat = 0.0001
if abs(i) < kEpsilon {
i = kEpsilon * (i > 0 ? 1 : -1)
}
return CATransform3D(
m11: a/i, m12: d/i, m13: 0, m14: g/i,
m21: b/i, m22: e/i, m23: 0, m24: h/i,
m31: 0, m32: 0, m33: 1, m34: 0,
m41: c/i, m42: f/i, m43: 0, m44: 1.0)
}
}
For a quick test:
#IBOutlet var someImage: JoshQuadView!
someImage.transformToFitQuadTopLeft(
tl: CGPoint(x: 0, y: 0),
tr: CGPoint(x: 400, y: 0),
bl: CGPoint(x: 0, y: 400),
br: CGPoint(x: 400, y: 400))
It will render the normal square image.
someImage.transformToFitQuadTopLeft(
tl: CGPoint(x: -50, y: -20),
tr: CGPoint(x: 400, y: 0),
bl: CGPoint(x: 0, y: 400),
br: CGPoint(x: 400, y: 400))
It will bend away the top left corner.
Important reminder. JoshRL originally used the ordering tl tr bl br. So that is maintained here. It's more common to use tl then clockwise when dealing with verts (ie, tl tr br bl), so just bear it in mind!
To use in Swift with draggable corner handles:
say you have a container view "QuadScreen".
The view you want to stretch will be a JoshQuadView. Drop it in the scene. Connect it to the IBOutlet, "jqv" in the example here.
Simply put four corner-handles (ie, images) in the scene, being PNGs of your handle icons. Link those to the four IBOutlets for handles. The code just completely handles these handles. (Follow the comments in the code for how to easily set them up in storyboard.)
Then, it's just one line of code to do the stretching:
class QuadScreen: UIViewController {
// sit your JoshQuadView in this view
#IBOutlet var jqv: JoshQuadView!
// simply have four small subview views, "handles"
// with an icon on them (perhaps a small circle)
// and put those over the four corners of the jqv
// NOTE numbered CLOCKWISE from top left here:
#IBOutlet var handle1: UIView!
#IBOutlet var handle2: UIView!
#IBOutlet var handle3: UIView!
#IBOutlet var handle4: UIView!
// put a pan recognizer on each handle, action goes to here
// (for the pan recognizers, set cancels-in-view as needed
// if you, example, highlight them on touch in their class)
#IBAction func dragHandle(p: UIPanGestureRecognizer!) {
let tr = p.translationInView(p.view)
p.view!.center.x += tr.x
p.view!.center.y += tr.y
p.setTranslation(.zero, inView: p.view)
jqv.transformToFitQuadTopLeft( handle1.center, tr: handle2.center, bl: handle4.center, br: handle3.center)
// it's that simple, there's nothing else to do
p.setTranslation(.zero, inView: p.view)
}
override func viewDidLayoutSubviews() {
// don't forget to do this....is critical.
jqv.layer.anchorPoint = .zero
}
}
As a curiosity, and for the sake of google, it's ridiculously easy to do this in
Android
they have a built-in command for reshaping polys. This excellent answer has copy and paste code: https://stackoverflow.com/a/34667015/294884
If your new quadrilateral is a parallelogram, then this is called "shear," and can be done most easily with CGAffineTransform. See Jeff LaMarche's excellent article, CGAffineTransform 1.1.
If your new quadrilateral is not a parallelogram, then see the following question for how to apply CATransform3D: iPhone image stretching (skew).
ANCHOR POINT INDEPENDENT Solution:
I really like #joshrl answer where he makes a category "UIView+Quadrilateral" which uses #hfossli's most excellent answer above. However, multiple calls to the category to change the quadrilateral fails, and the code requires the AnchorPoint to be top-left.
My solution (derived from theirs):
Accounts for any AnchorPoint
Allows for changes to the quadrilateral
UIView+Quadrilateral.h:
#import <UIKit/UIKit.h>
#import <QuartzCore/QuartzCore.h>
#interface UIView (Quadrilateral)
//Sets frame to bounding box of quad and applies transform
- (void)transformToFitQuadTopLeft:(CGPoint)tl topRight:(CGPoint)tr bottomLeft:(CGPoint)bl bottomRight:(CGPoint)br;
#end
UIView+Quadrilateral.m:
#import "UIView+Quadrilateral.h"
#implementation UIView (Quadrilateral)
- (void)transformToFitQuadTopLeft:(CGPoint)tl topRight:(CGPoint)tr bottomLeft:(CGPoint)bl bottomRight:(CGPoint)br
{
CGRect boundingBox = [[self class] boundingBoxForQuadTR:tr tl:tl bl:bl br:br];
self.layer.transform = CATransform3DIdentity; // keeps current transform from interfering
self.frame = boundingBox;
CGPoint frameTopLeft = boundingBox.origin;
CATransform3D transform = [[self class] rectToQuad:self.bounds
quadTL:CGPointMake(tl.x-frameTopLeft.x, tl.y-frameTopLeft.y)
quadTR:CGPointMake(tr.x-frameTopLeft.x, tr.y-frameTopLeft.y)
quadBL:CGPointMake(bl.x-frameTopLeft.x, bl.y-frameTopLeft.y)
quadBR:CGPointMake(br.x-frameTopLeft.x, br.y-frameTopLeft.y)];
// To account for anchor point, we must translate, transform, translate
CGPoint anchorPoint = self.layer.position;
CGPoint anchorOffset = CGPointMake(anchorPoint.x - boundingBox.origin.x, anchorPoint.y - boundingBox.origin.y);
CATransform3D transPos = CATransform3DMakeTranslation(anchorOffset.x, anchorOffset.y, 0.);
CATransform3D transNeg = CATransform3DMakeTranslation(-anchorOffset.x, -anchorOffset.y, 0.);
CATransform3D fullTransform = CATransform3DConcat(CATransform3DConcat(transPos, transform), transNeg);
// Now we set our transform
self.layer.transform = fullTransform;
}
+ (CGRect)boundingBoxForQuadTR:(CGPoint)tr tl:(CGPoint)tl bl:(CGPoint)bl br:(CGPoint)br
{
CGRect boundingBox = CGRectZero;
CGFloat xmin = MIN(MIN(MIN(tr.x, tl.x), bl.x),br.x);
CGFloat ymin = MIN(MIN(MIN(tr.y, tl.y), bl.y),br.y);
CGFloat xmax = MAX(MAX(MAX(tr.x, tl.x), bl.x),br.x);
CGFloat ymax = MAX(MAX(MAX(tr.y, tl.y), bl.y),br.y);
boundingBox.origin.x = xmin;
boundingBox.origin.y = ymin;
boundingBox.size.width = xmax - xmin;
boundingBox.size.height = ymax - ymin;
return boundingBox;
}
+ (CATransform3D)rectToQuad:(CGRect)rect
quadTL:(CGPoint)topLeft
quadTR:(CGPoint)topRight
quadBL:(CGPoint)bottomLeft
quadBR:(CGPoint)bottomRight
{
return [self rectToQuad:rect quadTLX:topLeft.x quadTLY:topLeft.y quadTRX:topRight.x quadTRY:topRight.y quadBLX:bottomLeft.x quadBLY:bottomLeft.y quadBRX:bottomRight.x quadBRY:bottomRight.y];
}
+ (CATransform3D)rectToQuad:(CGRect)rect
quadTLX:(CGFloat)x1a
quadTLY:(CGFloat)y1a
quadTRX:(CGFloat)x2a
quadTRY:(CGFloat)y2a
quadBLX:(CGFloat)x3a
quadBLY:(CGFloat)y3a
quadBRX:(CGFloat)x4a
quadBRY:(CGFloat)y4a
{
CGFloat X = rect.origin.x;
CGFloat Y = rect.origin.y;
CGFloat W = rect.size.width;
CGFloat H = rect.size.height;
CGFloat y21 = y2a - y1a;
CGFloat y32 = y3a - y2a;
CGFloat y43 = y4a - y3a;
CGFloat y14 = y1a - y4a;
CGFloat y31 = y3a - y1a;
CGFloat y42 = y4a - y2a;
CGFloat a = -H*(x2a*x3a*y14 + x2a*x4a*y31 - x1a*x4a*y32 + x1a*x3a*y42);
CGFloat b = W*(x2a*x3a*y14 + x3a*x4a*y21 + x1a*x4a*y32 + x1a*x2a*y43);
CGFloat c = H*X*(x2a*x3a*y14 + x2a*x4a*y31 - x1a*x4a*y32 + x1a*x3a*y42) - H*W*x1a*(x4a*y32 - x3a*y42 + x2a*y43) - W*Y*(x2a*x3a*y14 + x3a*x4a*y21 + x1a*x4a*y32 + x1a*x2a*y43);
CGFloat d = H*(-x4a*y21*y3a + x2a*y1a*y43 - x1a*y2a*y43 - x3a*y1a*y4a + x3a*y2a*y4a);
CGFloat e = W*(x4a*y2a*y31 - x3a*y1a*y42 - x2a*y31*y4a + x1a*y3a*y42);
CGFloat f = -(W*(x4a*(Y*y2a*y31 + H*y1a*y32) - x3a*(H + Y)*y1a*y42 + H*x2a*y1a*y43 + x2a*Y*(y1a - y3a)*y4a + x1a*Y*y3a*(-y2a + y4a)) - H*X*(x4a*y21*y3a - x2a*y1a*y43 + x3a*(y1a - y2a)*y4a + x1a*y2a*(-y3a + y4a)));
CGFloat g = H*(x3a*y21 - x4a*y21 + (-x1a + x2a)*y43);
CGFloat h = W*(-x2a*y31 + x4a*y31 + (x1a - x3a)*y42);
CGFloat i = W*Y*(x2a*y31 - x4a*y31 - x1a*y42 + x3a*y42) + H*(X*(-(x3a*y21) + x4a*y21 + x1a*y43 - x2a*y43) + W*(-(x3a*y2a) + x4a*y2a + x2a*y3a - x4a*y3a - x2a*y4a + x3a*y4a));
const double kEpsilon = 0.0001;
if(fabs(i) < kEpsilon)
{
i = kEpsilon* (i > 0 ? 1.0 : -1.0);
}
CATransform3D transform = {a/i, d/i, 0, g/i, b/i, e/i, 0, h/i, 0, 0, 1, 0, c/i, f/i, 0, 1.0};
return transform;
}
#end
The above category is so simple and elegant, it ought to be included in every toolbox. THANK YOUs to the ultimate sources of the above code. No credit should be given to me.
Using built-in Swift matrix math:
https://github.com/paulz/PerspectiveTransform#swift-code-example
import PerspectiveTransform
let destination = Perspective(
CGPoint(x: 108.315837, y: 80.1687782),
CGPoint(x: 377.282671, y: 41.4352201),
CGPoint(x: 193.321418, y: 330.023027),
CGPoint(x: 459.781253, y: 251.836131)
)
// Starting perspective is the current overlay frame or could be another 4 points.
let start = Perspective(overlayView.frame)
// Caclulate CATransform3D from start to destination
overlayView.layer.transform = start.projectiveTransform(destination: destination)
#hfossli answer, (the accepted and most voted answer) is calculating the final transform matrix, which is "magic code" that is complicated and unreadable in any way, and I think without any real reason.
What you need to do is the following transformations:
translation x rotation x scaling
(Order is important - you must have the scaling as the most right side, and translation most left).
And then invert the matrix.
(Or you could already calculate the inverted matrix by inverting the order, and doing the opposite transformations (translating in opposite direction, rotating in opposite angle, and scale in inverse size). )
In iOS I guess it will be something along the lines of:
CATransform3D t = CATransform3DIdentity;
t = CATransform3DScale(t, .... )
t = CATransform3DRotate(t, ....)
t = CATransform3DTranslate(t, ....)
CATransform3D invertT = CATransform3DInvert(t);
where you fill .... with the actual scaling, rotation and translation needed.
I am transforming a view with CGAffineTransformMake. It rotates, scales and translates. This works just fine. But I cannot figure out a way to limit the scale to a maximum size.
If the scale is exceeded, I need to still apply the current rotation and translation.
Any suggestions are greatly appreciated!
Source:
UITouch *touch1 = [sortedTouches objectAtIndex:0];
UITouch *touch2 = [sortedTouches objectAtIndex:1];
CGPoint beginPoint1 = *(CGPoint *)CFDictionaryGetValue(touchBeginPoints, touch1);
CGPoint currentPoint1 = [touch1 locationInView:self.superview];
CGPoint beginPoint2 = *(CGPoint *)CFDictionaryGetValue(touchBeginPoints, touch2);
CGPoint currentPoint2 = [touch2 locationInView:self.superview];
double layerX = self.center.x;
double layerY = self.center.y;
double x1 = beginPoint1.x - layerX;
double y1 = beginPoint1.y - layerY;
double x2 = beginPoint2.x - layerX;
double y2 = beginPoint2.y - layerY;
double x3 = currentPoint1.x - layerX;
double y3 = currentPoint1.y - layerY;
double x4 = currentPoint2.x - layerX;
double y4 = currentPoint2.y - layerY;
// Solve the system:
// [a b t1, -b a t2, 0 0 1] * [x1, y1, 1] = [x3, y3, 1]
// [a b t1, -b a t2, 0 0 1] * [x2, y2, 1] = [x4, y4, 1]
double D = (y1-y2)*(y1-y2) + (x1-x2)*(x1-x2);
if (D < 0.1) {
return CGAffineTransformMakeTranslation(x3-x1, y3-y1);
}
double a = (y1-y2)*(y3-y4) + (x1-x2)*(x3-x4);
double b = (y1-y2)*(x3-x4) - (x1-x2)*(y3-y4);
double tx = (y1*x2 - x1*y2)*(y4-y3) - (x1*x2 + y1*y2)*(x3+x4) + x3*(y2*y2 + x2*x2) + x4*(y1*y1 + x1*x1);
double ty = (x1*x2 + y1*y2)*(-y4-y3) + (y1*x2 - x1*y2)*(x3-x4) + y3*(y2*y2 + x2*x2) + y4*(y1*y1 + x1*x1);
return CGAffineTransformMake(a/D, -b/D, b/D, a/D, tx/D, ty/D);
Do something like this:
CGAffineTransform transform = self.view.transform;
float scale = sqrt(transform.a*transform.a + transform.c*transform.c);
if (scale > SCALE_MAX)
self.view.transform = CGAffineTransformScale(transform, SCALE_MAX/scale, SCALE_MAX/scale);
else if (scale < SCALE_MIN)
self.view.transform = CGAffineTransformScale(transform, SCALE_MIN/scale, SCALE_MIN/scale);
At the end of your touchesMoved:withEvent: and updateOriginalTransformForTouches methods. Basically, you check if the current scale exceeds a certain SCALE_MAX value, then multiply your transformation matrix with the reversed scale value.
In case anyone needs #Enzo Tran's answer in Swift with UIPanGestureRecognizer:
func handlePinch(recognizer : UIPinchGestureRecognizer) {
if let view = recognizer.view {
view.transform = CGAffineTransformScale(view.transform,
recognizer.scale, recognizer.scale)
let transform = view.transform
let maxScale: CGFloat = 1.7 //Anything
let minScale: CGFloat = 0.5 //Anything
let scale = sqrt(transform.a * transform.a + transform.c * transform.c)
if scale > maxScale {
view.transform = CGAffineTransformScale(transform, maxScale / scale, maxScale / scale)
}
else if scale < minScale {
view.transform = CGAffineTransformScale(transform, minScale / scale, minScale / scale)
}
recognizer.scale = 1
}
}
I have four UIViews on a UIScrollView (screen divided into quartiles)
On the quartiles, I have a few objects (UIImageViews), on each quartile.
When the user taps the screen, I want to find the closest object to the given CGPoint?
Any ideas?
I have the CGPoint and frame (CGRect) of the objects within each quartile.
UPDATE:
(source: skitch.com)Red Pins are UIImageViews.
// UIScrollView
NSLog(#" UIScrollView: %#", self);
// Here's the tap on the Window in UIScrollView's coordinates
NSLog(#"TapPoint: %3.2f, %3.2f", tapLocation.x, tapLocation.y);
// Find Distance between tap and objects
NSArray *arrayOfCGRrectObjects = [self subviews];
NSEnumerator *enumerator = [arrayOfCGRrectObjects objectEnumerator];
for (UIView *tilesOnScrollView in enumerator) {
// each tile may have 0 or more images
for ( UIView *subview in tilesOnScrollView.subviews ) {
// Is this an UIImageView?
if ( [NSStringFromClass([subview class]) isEqualToString:#"UIImageView"]) {
// Yes, here are the UIImageView details (subView)
NSLog(#"%#", subview);
// Convert CGPoint of UIImageView to CGPoint of UIScrollView for comparison...
// First, Convert CGPoint from UIScrollView to UIImageView's coordinate system for reference
CGPoint found = [subview convertPoint:tapLocation fromView:self];
NSLog(#"Converted Point from ScrollView: %3.2f, %3.2f", found.x, found.y);
// Second, Convert CGPoint from UIScrollView to Window's coordinate system for reference
found = [subview convertPoint:subview.frame.origin toView:nil];
NSLog(#"Converted Point in Window: %3.2f, %3.2f", found.x, found.y);
// Finally, use the object's CGPoint in UIScrollView's coordinates for comparison
found = [subview convertPoint:subview.frame.origin toView:self]; // self is UIScrollView (see above)
NSLog(#"Converted Point: %3.2f, %3.2f", found.x, found.y);
// Determine tap CGPoint in UIImageView's coordinate system
CGPoint localPoint = [touch locationInView:subview];
NSLog(#"LocateInView: %3.2f, %3.2f",localPoint.x, localPoint.y );
//Kalle's code
CGRect newRect = CGRectMake(found.x, found.y, 32, 39);
NSLog(#"Kalle's Distance: %3.2f",[self distanceBetweenRect:newRect andPoint:tapLocation]);
}
Debug Console
Here's the problem. Each Tile is 256x256. The first UIImageView's CGPoint converted to the
UIScrollView's coordinate system (53.25, 399.36) should be dead on with the tapPoint (30,331). Why the difference?? The other point to the right of the tapped point is calculating closer (distance wise)??
<CALayer: 0x706a690>>
[207] TapPoint: 30.00, 331.00
[207] <UIImageView: 0x7073db0; frame = (26.624 71.68; 32 39); opaque = NO; userInteractionEnabled = NO; tag = 55; layer = <CALayer: 0x70747d0>>
[207] Converted Point from ScrollView: 3.38, 3.32
[207] Converted Point in Window: 53.25, 463.36
[207] Converted Point: 53.25, 399.36 *** Looks way off!
[207] LocateInView: 3.38, 3.32
[207] Kalle's Distance: 72.20 **** THIS IS THE TAPPED POINT
[207] <UIImageView: 0x7074fb0; frame = (41.984 43.008; 32 39); opaque = NO; userInteractionEnabled = NO; tag = 55; layer = <CALayer: 0x7074fe0>>
[207] Converted Point from ScrollView: -11.98, 31.99
[207] Converted Point in Window: 83.97, 406.02
[207] Converted Point: 83.97, 342.02
[207] LocateInView: -11.98, 31.99
207] Kalle's Distance: 55.08 ***** BUT THIS ONE's CLOSER??????
The following method should do the trick. If you spot anything weird in it feel free to point it out.
- (CGFloat)distanceBetweenRect:(CGRect)rect andPoint:(CGPoint)point
{
// first of all, we check if point is inside rect. If it is, distance is zero
if (CGRectContainsPoint(rect, point)) return 0.f;
// next we see which point in rect is closest to point
CGPoint closest = rect.origin;
if (rect.origin.x + rect.size.width < point.x)
closest.x += rect.size.width; // point is far right of us
else if (point.x > rect.origin.x)
closest.x = point.x; // point above or below us
if (rect.origin.y + rect.size.height < point.y)
closest.y += rect.size.height; // point is far below us
else if (point.y > rect.origin.y)
closest.y = point.y; // point is straight left or right
// we've got a closest point; now pythagorean theorem
// distance^2 = [closest.x,y - closest.x,point.y]^2 + [closest.x,point.y - point.x,y]^2
// i.e. [closest.y-point.y]^2 + [closest.x-point.x]^2
CGFloat a = powf(closest.y-point.y, 2.f);
CGFloat b = powf(closest.x-point.x, 2.f);
return sqrtf(a + b);
}
Example output:
CGPoint p = CGPointMake(12,12);
CGRect a = CGRectMake(5,5,10,10);
CGRect b = CGRectMake(13,11,10,10);
CGRect c = CGRectMake(50,1,10,10);
NSLog(#"distance p->a: %f", [self distanceBetweenRect:a andPoint:p]);
// 2010-08-24 13:36:39.506 app[4388:207] distance p->a: 0.000000
NSLog(#"distance p->b: %f", [self distanceBetweenRect:b andPoint:p]);
// 2010-08-24 13:38:03.149 app[4388:207] distance p->b: 1.000000
NSLog(#"distance p->c: %f", [self distanceBetweenRect:c andPoint:p]);
// 2010-08-24 13:39:52.148 app[4388:207] distance p->c: 38.013157
There might be more optimized versions out there, so might be worth digging more.
The following method determines the distance between two CGPoints.
- (CGFloat)distanceBetweenPoint:(CGPoint)a andPoint:(CGPoint)b
{
CGFloat a2 = powf(a.x-b.x, 2.f);
CGFloat b2 = powf(a.y-b.y, 2.f);
return sqrtf(a2 + b2)
}
Update: removed fabsf(); -x^2 is the same as x^2, so it's unnecessary.
Update 2: added distanceBetweenPoint:andPoint: method too, for completeness.
If you're using Swift, here's how you can calculate the distance between a CGPoint and a CGRect (e.g. an UIView's frame)
private func distanceToRect(rect: CGRect, fromPoint point: CGPoint) -> CGFloat {
// if it's on the left then (rect.minX - point.x) > 0 and (point.x - rect.maxX) < 0
// if it's on the right then (rect.minX - point.x) < 0 and (point.x - rect.maxX) > 0
// if it's inside the rect then both of them < 0.
let dx = max(rect.minX - point.x, point.x - rect.maxX, 0)
// same as dx
let dy = max(rect.minY - point.y, point.y - rect.maxY, 0)
// if one of them == 0 then the distance is the other one.
if dx * dy == 0 {
return max(dx, dy)
} else {
// both are > 0 then the distance is the hypotenuse
return hypot(dx, dy)
}
}
Thanks #cristian,
Here's Objective-C version of your answer
- (CGFloat)distanceToRect:(CGRect)rect fromPoint:(CGPoint)point
{
CGFloat dx = MAX(0, MAX(CGRectGetMinX(rect) - point.x, point.x - CGRectGetMaxX(rect)));
CGFloat dy = MAX(0, MAX(CGRectGetMinY(rect) - point.y, point.y - CGRectGetMaxY(rect)));
if (dx * dy == 0)
{
return MAX(dx, dy);
}
else
{
return hypot(dx, dy);
}
}
Shorter #cristian answer:
func distance(from rect: CGRect, to point: CGPoint) -> CGFloat {
let dx = max(rect.minX - point.x, point.x - rect.maxX, 0)
let dy = max(rect.minY - point.y, point.y - rect.maxY, 0)
return dx * dy == 0 ? max(dx, dy) : hypot(dx, dy)
}
Personally, I would implement this as a CGPoint extension:
extension CGPoint {
func distance(from rect: CGRect) -> CGFloat {
let dx = max(rect.minX - x, x - rect.maxX, 0)
let dy = max(rect.minY - y, y - rect.maxY, 0)
return dx * dy == 0 ? max(dx, dy) : hypot(dx, dy)
}
}
Alternatively, you can also implement it as a CGRect extension:
extension CGRect {
func distance(from point: CGPoint) -> CGFloat {
let dx = max(minX - point.x, point.x - maxX, 0)
let dy = max(minY - point.y, point.y - maxY, 0)
return dx * dy == 0 ? max(dx, dy) : hypot(dx, dy)
}
}