I'm using apple' approach to handle AR in sceneKit, and here I use this method to scale the object, it works fine when all the nodes of the object merged into one node, but because of the object's animations, I can't make all into one node. So Need to change this method differently that I can scale all the nodes together.
Can anyone help me on that? Thanks
var trackedObject: VirtualObject? {
didSet {
guard trackedObject != nil else { return }
selectedObject = trackedObject
}
}
#objc func pinched(_ gesture :UIPinchGestureRecognizer) {
switch gesture.state {
case .began:
gesture.scale = isScaledChanged ? CGFloat(trackedObject?.objectScale.x ?? 1) : CGFloat(1)
case .changed:
var newScale: SCNVector3
if gesture.scale < 0.5 {
newScale = SCNVector3(x: 0.5, y: 0.5, z: 0.5)
} else if gesture.scale > 3 {
newScale = SCNVector3(3, 3, 3)
} else {
newScale = SCNVector3(gesture.scale, gesture.scale, gesture.scale)
}
isScaledChanged = true
trackedObject?.objectScale = newScale
default:
break
}
}
and in the VirtualObject class
var objectScale: SCNVector3 {
get {
return childNodes.first!.scale
}
set (newValue) {
childNodes.first!.scale = newValue
}
}
I did something like that, but didn't work well
var objectScale: SCNVector3 {
get {
for node in childNodes {
return node.scale
}
return childNodes.first!.scale
}
set (newValue) {
for node in childNodes {
node.scale = newValue
}
}
}
Related
I would like to create a gesture which would require LongTapGesture before one of other two gestures (DragGesture and MagnificationGesture) is possible. It works fine for just dragging, but if you try to magnify it soon ends in a hangup state - looks like magnification never ends.
What would be the right way to do it?
Here's the code:
struct DraggableCircle: View {
enum DragState {
case inactive
case pressing
case draggingOrMagnifying(translation: CGSize, magnification: CGFloat)
var translation: CGSize {
switch self {
case .inactive, .pressing:
return .zero
case .draggingOrMagnifying(let translation, _):
return translation
}
}
var magnification: CGFloat {
switch self {
case .inactive, .pressing:
return 1
case .draggingOrMagnifying(_, let magnification):
return magnification
}
}
var isActive: Bool {
switch self {
case .inactive:
return false
case .pressing, .draggingOrMagnifying:
return true
}
}
var isDragging: Bool {
switch self {
case .inactive, .pressing:
return false
case .draggingOrMagnifying:
return true
}
}
}
#GestureState var dragState = DragState.inactive
#State var viewState = CGSize.zero
#State var magState = CGFloat(1)
var body: some View {
let minimumLongPressDuration = 0.5
let longPressDrag = LongPressGesture(minimumDuration: minimumLongPressDuration)
.sequenced(before: DragGesture().exclusively(before: MagnificationGesture()))
.updating($dragState) { value, state, transaction in
switch value {
// Long press begins.
case .first(true):
state = .pressing
// Long press confirmed, dragging may begin.
case .second(true, let exclusiveGesture):
switch exclusiveGesture {
case .first(let drag):
state = .draggingOrMagnifying(translation: drag.translation, magnification: 1)
case .second(let mag):
state = .draggingOrMagnifying(translation: .zero, magnification: mag)
case .none:
state = .draggingOrMagnifying(translation: .zero, magnification: 1)
}
print(state)
// Dragging ended or the long press cancelled.
default:
state = .inactive
}
}
.onEnded { value in
guard case .second(true, let gest?) = value else { return }
switch gest {
case .first(let drag):
self.viewState.width += drag.translation.width
self.viewState.height += drag.translation.height
case .second(let mag):
self.magState = magState * mag
}
}
return Circle()
.fill(Color.blue)
.overlay(dragState.isDragging ? Circle().stroke(Color.white, lineWidth: 2) : nil)
.frame(width: clipToBounds(100 * magState * dragState.magnification), height: clipToBounds(100 * magState * dragState.magnification), alignment: .center)
.offset(
x: viewState.width + dragState.translation.width,
y: viewState.height + dragState.translation.height
)
.animation(nil)
.shadow(radius: dragState.isActive ? 8 : 0)
.animation(.linear(duration: minimumLongPressDuration))
.gesture(longPressDrag)
}
func clipToBounds(_ m: CGFloat) -> CGFloat {
if abs(m) > UIScreen.main.bounds.height {
return UIScreen.main.bounds.height
} else {
return abs(m)
}
}
}
I have an UIView which slides in from the Botton after you 1) tap or 2) slide it to the top. Right now, you can close it by 1) tap it again or 2) slide it to the bottom.
I want to prevent the: Close the view after a single tap and only allow to slide it down to close it.
This is my current code: (from: https://www.swiftkickmobile.com/building-better-app-animations-swift-uiviewpropertyanimator)
/// A pan gesture that enters into the `began` state on touch down instead of waiting for a touches moved event.
class InstantPanGestureRecognizer: UIPanGestureRecognizer {
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent) {
if (self.state == UIGestureRecognizer.State.began) { return }
super.touchesBegan(touches, with: event)
self.state = UIGestureRecognizer.State.began
}
}
#objc private func popupViewPanned(recognizer: UIPanGestureRecognizer) {
print(recognizer.state.rawValue)
switch recognizer.state {
case .began:
// Start the animations
animateTransitionIfNeeded(to: currentState.opposite, duration: 0.5)
// Pause all animations, since the next event may be a pan changed
runningAnimators.forEach { $0.pauseAnimation() }
// Keep track of each animator's progress
animationProgress = runningAnimators.map { $0.fractionComplete }
case .changed:
// Variable setup
let translation = recognizer.translation(in: popupView)
if UIDevice().userInterfaceIdiom == .phone {
switch UIScreen.main.nativeBounds.height {
case 1334, 1920, 2208:
var fraction = -translation.y / popupOffset
// adjust the fraction for the current state and reversed state
if currentState == .open { fraction *= -1 }
if runningAnimators[0].isReversed { fraction *= -1 }
// apply the new fraction
for (index, animator) in runningAnimators.enumerated() {
animator.fractionComplete = fraction + animationProgress[index]
}
case 2436, 2688, 1792:
var fraction = -translation.y / popupOffsetNotch
// adjust the fraction for the current state and reversed state
if currentState == .open { fraction *= -1 }
if runningAnimators[0].isReversed { fraction *= -1 }
// apply the new fraction
for (index, animator) in runningAnimators.enumerated() {
animator.fractionComplete = fraction + animationProgress[index]
}
default:
var fraction = -translation.y / popupOffsetNotch
// adjust the fraction for the current state and reversed state
if currentState == .open { fraction *= -1 }
if runningAnimators[0].isReversed { fraction *= -1 }
// apply the new fraction
for (index, animator) in runningAnimators.enumerated() {
animator.fractionComplete = fraction + animationProgress[index]
}
}
}
case .ended:
// variable setup
let yVelocity = recognizer.velocity(in: popupView).y
let shouldClose = yVelocity > 0
// if there is no motion, continue all animations and exit early
if yVelocity == 0 {
runningAnimators.forEach { $0.continueAnimation(withTimingParameters: nil, durationFactor: 0) }
break
}
// reverse the animations based on their current state and pan motion
switch currentState {
case .open:
if !shouldClose && !runningAnimators[0].isReversed { runningAnimators.forEach { $0.isReversed = !$0.isReversed } }
if shouldClose && runningAnimators[0].isReversed { runningAnimators.forEach { $0.isReversed = !$0.isReversed } }
case .closed:
if shouldClose && !runningAnimators[0].isReversed { runningAnimators.forEach { $0.isReversed = !$0.isReversed } }
if !shouldClose && runningAnimators[0].isReversed { runningAnimators.forEach { $0.isReversed = !$0.isReversed } }
}
// continue all animations
runningAnimators.forEach { $0.continueAnimation(withTimingParameters: nil, durationFactor: 0) }
default:
()
}
}
/// Animates the transition, if the animation is not already running.
private func animateTransitionIfNeeded(to state: State, duration: TimeInterval) {
// ensure that the animators array is empty (which implies new animations need to be created)
guard runningAnimators.isEmpty else { return }
// an animator for the transition
let transitionAnimator = UIViewPropertyAnimator(duration: duration, dampingRatio: 1, animations: {
switch state {
case .open:
self.bottomConstraint.constant = 0
self.popupView.layer.cornerRadius = 10
case .closed:
if UIDevice().userInterfaceIdiom == .phone {
switch UIScreen.main.nativeBounds.height {
case 1334, 1920, 2208:
self.bottomConstraint.constant = self.popupOffset
case 2436, 2688, 1792:
self.bottomConstraint.constant = self.popupOffsetNotch
default:
self.bottomConstraint.constant = self.popupOffsetNotch
}
}
self.popupView.layer.cornerRadius = 5
}
self.view.layoutIfNeeded()
})
// the transition completion block
transitionAnimator.addCompletion { position in
// update the state
switch position {
case .start:
self.currentState = state.opposite
case .end:
self.currentState = state
case .current:
()
#unknown default:
return
}
// manually reset the constraint positions
switch self.currentState {
case .open:
self.bottomConstraint.constant = 0
case .closed:
if UIDevice().userInterfaceIdiom == .phone {
switch UIScreen.main.nativeBounds.height {
case 1334, 1920, 2208:
self.bottomConstraint.constant = self.popupOffset
case 2436, 2688, 1792:
self.bottomConstraint.constant = self.popupOffsetNotch
default:
self.bottomConstraint.constant = self.popupOffsetNotch
}
}
}
// remove all running animators
self.runningAnimators.removeAll()
}
// start all animators
transitionAnimator.startAnimation()
// keep track of all running animators
runningAnimators.append(transitionAnimator)
}
Solution: panRecognizer.delegate = self + UIGestureRecognizerDelegate +
// This function is needed to add the recognizer only to its parent view.
func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldReceive touch: UITouch) -> Bool {
return touch.view == gestureRecognizer.view
}
and use child views.
I'm implementing custom transition using CABasicAnimation and UIView.animate both. Also need to implement a custom interactive transition using UIPercentDrivenInteractiveTransition which exactly copies the behavior of the native iOS swipe back. Animation without a back swipe gesture (when I'm pushing and popping by the back arrow) works fine and smoothly. Moreover, swipe back also works smoothly, except when the gesture velocity is more than 900
Gesture Recognition function:
#objc func handleBackGesture(_ gesture: UIScreenEdgePanGestureRecognizer) {
guard animationTransition != nil else { return }
switch gesture.state {
case .began:
interactionController = TransparentNavigationControllerTransitionInteractor(duration: anumationDuration)
popViewController(animated: true)
case .changed:
guard let view = gesture.view?.superview else { return }
let translation = gesture.translation(in: view)
var percentage = translation.x / view.bounds.size.width
percentage = min(1.0, max(0.0, percentage))
shouldCompleteTransition = percentage > 0.5
interactionController?.update(percentage)
case .cancelled, .failed, .possible:
if let interactionController = self.interactionController {
isInteractiveStarted = false
interactionController.cancel()
}
case .ended:
interactionController?.completionSpeed = 0.999
let greaterThanMaxVelocity = gesture.velocity(in: view).x > 800
let canFinish = shouldCompleteTransition || greaterThanMaxVelocity
canFinish ? interactionController?.finish() : interactionController?.cancel()
interactionController = nil
#unknown default: assertionFailure()
}
}
UIPercentDrivenInteractiveTransition class. Here I'm synchronizing layer animation.
final class TransparentNavigationControllerTransitionInteractor: UIPercentDrivenInteractiveTransition {
// MARK: - Private Properties
private var context: UIViewControllerContextTransitioning?
private var pausedTime: CFTimeInterval = 0
private let animationDuration: TimeInterval
// MARK: - Initialization
init(duration: TimeInterval) {
self.animationDuration = duration * 0.4 // I dk why but layer duration should be less
super.init()
}
// MARK: - Public Methods
override func startInteractiveTransition(_ transitionContext: UIViewControllerContextTransitioning) {
super.startInteractiveTransition(transitionContext)
context = transitionContext
pausedTime = transitionContext.containerView.layer.convertTime(CACurrentMediaTime(), from: nil)
transitionContext.containerView.layer.speed = 0
transitionContext.containerView.layer.timeOffset = pausedTime
}
override func finish() {
restart(isFinishing: true)
super.finish()
}
override func cancel() {
restart(isFinishing: false)
super.cancel()
}
override func update(_ percentComplete: CGFloat) {
super.update(percentComplete)
guard let transitionContext = context else { return }
let progress = CGFloat(animationDuration) * percentComplete
transitionContext.containerView.layer.timeOffset = pausedTime + Double(progress)
}
// MARK: - Private Methods
private func restart(isFinishing: Bool) {
guard let transitionLayer = context?.containerView.layer else { return }
transitionLayer.beginTime = transitionLayer.convertTime(CACurrentMediaTime(), from: nil)
transitionLayer.speed = isFinishing ? 1 : -1
}
}
And here is my Dismissal animation function in UIViewControllerAnimatedTransitioning class
private func runDismissAnimationFrom(
_ fromView: UIView,
to toView: UIView,
in transitionContext: UIViewControllerContextTransitioning) {
guard let toViewController = transitionContext.viewController(forKey: .to) else { return }
toView.frame = toView.frame.offsetBy(dx: -fromView.frame.width / 3, dy: 0)
let toViewFinalFrame = transitionContext.finalFrame(for: toViewController)
let fromViewFinalFrame = fromView.frame.offsetBy(dx: fromView.frame.width, dy: 0)
// Create mask to hide bottom view with sliding
let slidingMask = CAShapeLayer()
let initialMaskPath = UIBezierPath(rect: CGRect(
x: fromView.frame.width / 3,
y: 0,
width: 0,
height: toView.frame.height)
)
let finalMaskPath = UIBezierPath(rect: toViewFinalFrame)
slidingMask.path = initialMaskPath.cgPath
toView.layer.mask = slidingMask
toView.alpha = 0
let slidingAnimation = CABasicAnimation(keyPath: "path")
slidingAnimation.fromValue = initialMaskPath.cgPath
slidingAnimation.toValue = finalMaskPath.cgPath
slidingAnimation.timingFunction = .init(name: .linear)
slidingMask.path = finalMaskPath.cgPath
slidingMask.add(slidingAnimation, forKey: slidingAnimation.keyPath)
UIView.animate(
withDuration: duration,
delay: 0,
options: animationOptions,
animations: {
fromView.frame = fromViewFinalFrame
toView.frame = toViewFinalFrame
toView.alpha = 1
},
completion: { _ in
toView.layer.mask = nil
transitionContext.completeTransition(!transitionContext.transitionWasCancelled)
})
}
I note that glitch occurs only when a swipe has a grand velocity.
Here a video with the result of smooth animation at normal speed and not smooth at high speed - https://youtu.be/1d-kTPlhNvE
UPD:
I've already tried to use UIViewPropertyAnimator combine with
interruptibleAnimator(using transitionContext: UIViewControllerContextTransitioning) -> UIViewImplicitlyAnimating
But the result is another type of glitching.
I've solved the issue, just change a part of restart function:
transitionLayer.beginTime =
transitionLayer.convertTime(CACurrentMediaTime(), from: nil) - transitionLayer.timeOffset
transitionLayer.speed = 1
I don't really understand why, but looks like timeOffset subtraction works!
Here is my code:
viewDidLoad:
UIPinchGestureRecognizer *pinch = [[UIPinchGestureRecognizer alloc]initWithTarget:self action:#selector(pinch:)];
[self.canvas addGestureRecognizer:pinch];
pinch.delegate = self;
UIRotationGestureRecognizer *twoFingersRotate = [[UIRotationGestureRecognizer alloc] initWithTarget:self action:#selector(pinchRotate:)];
[[self canvas] addGestureRecognizer:twoFingersRotate];
twoFingersRotate.delegate = self;
Code For Pinches and Rotates:
-(BOOL)gestureRecognizer:(UIGestureRecognizer *)gestureRecognizer shouldRecognizeSimultaneouslyWithGestureRecognizer:(UIGestureRecognizer *)otherGestureRecognizer
{
return YES;
}
-(void)pinchRotate:(UIRotationGestureRecognizer*)rotate
{
SMImage *selectedImage = [DataCenter sharedDataCenter].selectedImage;
switch (rotate.state)
{
case UIGestureRecognizerStateBegan:
{
selectedImage.referenceTransform = selectedImage.transform;
break;
}
case UIGestureRecognizerStateChanged:
{
selectedImage.transform = CGAffineTransformRotate(selectedImage.referenceTransform, ([rotate rotation] * 55) * M_PI/180);
break;
}
default:
break;
}
}
-(void)pinch:(UIPinchGestureRecognizer*)pinch
{
SMImage *selectedImage = [DataCenter sharedDataCenter].selectedImage;
[self itemSelected];
switch (pinch.state)
{
case UIGestureRecognizerStateBegan:
{
selectedImage.referenceTransform = selectedImage.transform;
break;
}
case UIGestureRecognizerStateChanged:
{
CGAffineTransform transform = CGAffineTransformScale(selectedImage.referenceTransform, pinch.scale, pinch.scale);
selectedImage.transform = transform;
break;
}
default:
break;
}
}
My rotation works great on its own and my scale works great on its own, but they wont work together. One always works or the other doesn't. When I implement shouldRecognizeSimultaneouslyWithGestureRecognizer the two gestures seem to fight against each other and produce poor results. What am I missing? (Yes I have implemented <UIGestureRecognizerDelegate>)
Every time pinch: is called, you just compute the transform based on the pinch recognizer's scale. Every time pinchRotate: is called, you just compute the transform based on the rotation recognizer's rotation. You never combine the scale and the rotation into one transform.
Here's an approach. Give yourself one new instance variable, _activeRecognizers:
NSMutableSet *_activeRecognizers;
Initialize it in viewDidLoad:
_activeRecognizers = [NSMutableSet set];
Use one method as the action for both recognizers:
- (IBAction)handleGesture:(UIGestureRecognizer *)recognizer
{
SMImage *selectedImage = [DataCenter sharedDataCenter].selectedImage;
switch (recognizer.state) {
case UIGestureRecognizerStateBegan:
if (_activeRecognizers.count == 0)
selectedImage.referenceTransform = selectedImage.transform;
[_activeRecognizers addObject:recognizer];
break;
case UIGestureRecognizerStateEnded:
selectedImage.referenceTransform = [self applyRecognizer:recognizer toTransform:selectedImage.referenceTransform];
[_activeRecognizers removeObject:recognizer];
break;
case UIGestureRecognizerStateChanged: {
CGAffineTransform transform = selectedImage.referenceTransform;
for (UIGestureRecognizer *recognizer in _activeRecognizers)
transform = [self applyRecognizer:recognizer toTransform:transform];
selectedImage.transform = transform;
break;
}
default:
break;
}
}
You'll need this helper method:
- (CGAffineTransform)applyRecognizer:(UIGestureRecognizer *)recognizer toTransform:(CGAffineTransform)transform
{
if ([recognizer respondsToSelector:#selector(rotation)])
return CGAffineTransformRotate(transform, [(UIRotationGestureRecognizer *)recognizer rotation]);
else if ([recognizer respondsToSelector:#selector(scale)]) {
CGFloat scale = [(UIPinchGestureRecognizer *)recognizer scale];
return CGAffineTransformScale(transform, scale, scale);
}
else
return transform;
}
This works if you're just allowing rotating and scaling. (I even tested it!)
If you want to add panning, use a separate action method and just adjust selectedImage.center. Trying to do panning with rotation and scaling using selectedImage.transform is much more complicated.
Swift 3 with Pan, Rotate and Pinch
// MARK: - Gesturies
func transformUsingRecognizer(_ recognizer: UIGestureRecognizer, transform: CGAffineTransform) -> CGAffineTransform {
if let rotateRecognizer = recognizer as? UIRotationGestureRecognizer {
return transform.rotated(by: rotateRecognizer.rotation)
}
if let pinchRecognizer = recognizer as? UIPinchGestureRecognizer {
let scale = pinchRecognizer.scale
return transform.scaledBy(x: scale, y: scale)
}
if let panRecognizer = recognizer as? UIPanGestureRecognizer {
let deltaX = panRecognizer.translation(in: imageView).x
let deltaY = panRecognizer.translation(in: imageView).y
return transform.translatedBy(x: deltaX, y: deltaY)
}
return transform
}
var initialTransform: CGAffineTransform?
var gestures = Set<UIGestureRecognizer>(minimumCapacity: 3)
#IBAction func processTransform(_ sender: Any) {
let gesture = sender as! UIGestureRecognizer
switch gesture.state {
case .began:
if gestures.count == 0 {
initialTransform = imageView.transform
}
gestures.insert(gesture)
case .changed:
if var initial = initialTransform {
gestures.forEach({ (gesture) in
initial = transformUsingRecognizer(gesture, transform: initial)
})
imageView.transform = initial
}
case .ended:
gestures.remove(gesture)
default:
break
}
}
func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIGestureRecognizer) -> Bool {
return true
}
For this to happen you need to implement gesture delegate shouldRecognizeSimultaneouslyWithGestureRecognizer and put what gestures you would like to recognize simultaneously.
// ensure that the pinch and rotate gesture recognizers on a particular view can all recognize simultaneously
// prevent other gesture recognizers from recognizing simultaneously
- (BOOL)gestureRecognizer:(UIGestureRecognizer *)gestureRecognizer shouldRecognizeSimultaneouslyWithGestureRecognizer:(UIGestureRecognizer *)otherGestureRecognizer
{
// if the gesture recognizers's view isn't one of our views, don't allow simultaneous recognition
if (gestureRecognizer.view != firstView && gestureRecognizer.view != secondView)
return NO;
// if the gesture recognizers are on different views, don't allow simultaneous recognition
if (gestureRecognizer.view != otherGestureRecognizer.view)
return NO;
// if either of the gesture recognizers is the long press, don't allow simultaneous recognition
if ([gestureRecognizer isKindOfClass:[UILongPressGestureRecognizer class]] || [otherGestureRecognizer isKindOfClass:[UILongPressGestureRecognizer class]])
return NO;
return YES;
}
This code needs to be modified to the view for which you want simultaneous gesture recognisers. The above code is what you need.
This example does not use gesture recognizers, and directly computes the transformation matrix. It also properly handles one-to-two finger transitions.
class PincherView: UIView {
override var bounds :CGRect {
willSet(newBounds) {
oldBounds = self.bounds
} didSet {
self.imageLayer.position = ┼self.bounds
self._adjustScaleForBoundsChange()
}
}
var oldBounds :CGRect
var touch₁ :UITouch?
var touch₂ :UITouch?
var p₁ :CGPoint? // point 1 in image coordiate system
var p₂ :CGPoint? // point 2 in image coordinate system
var p₁ʹ :CGPoint? // point 1 in view coordinate system
var p₂ʹ :CGPoint? // point 2 in view coordinate system
var image :UIImage? {
didSet {self._reset()}
}
var imageLayer :CALayer
var imageTransform :CGAffineTransform {
didSet {
self.backTransform = self.imageTransform.inverted()
self.imageLayer.transform = CATransform3DMakeAffineTransform(self.imageTransform)
}
}
var backTransform :CGAffineTransform
var solutionMatrix :HXMatrix?
required init?(coder aDecoder: NSCoder) {
self.oldBounds = CGRect.zero
let layer = CALayer();
self.imageLayer = layer
self.imageTransform = CGAffineTransform.identity
self.backTransform = CGAffineTransform.identity
super.init(coder: aDecoder)
self.oldBounds = self.bounds
self.isMultipleTouchEnabled = true
self.layer.addSublayer(layer)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
let pʹ = touch.location(in: self).applying(self._backNormalizeTransform())
let p = pʹ.applying(self.backTransform)
if self.touch₁ == nil {
self.touch₁ = touch
self.p₁ʹ = pʹ
self.p₁ = p
} else if self.touch₂ == nil {
self.touch₂ = touch
self.p₂ʹ = pʹ
self.p₂ = p
}
}
self.solutionMatrix = self._computeSolutionMatrix()
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
let pʹ = touch.location(in: self).applying(self._backNormalizeTransform())
if self.touch₁ == touch {
self.p₁ʹ = pʹ
} else if self.touch₂ == touch {
self.p₂ʹ = pʹ
}
}
CATransaction.begin()
CATransaction.setValue(true, forKey:kCATransactionDisableActions)
// Whether you're using 1 finger or 2 fingers
if let q₁ʹ = self.p₁ʹ, let q₂ʹ = self.p₂ʹ {
self.imageTransform = self._computeTransform(q₁ʹ, q₂ʹ)
} else if let q₁ʹ = (self.p₁ʹ != nil ? self.p₁ʹ : self.p₂ʹ) {
self.imageTransform = self._computeTransform(q₁ʹ, CGPoint(x:q₁ʹ.x + 10, y:q₁ʹ.y + 10))
}
CATransaction.commit()
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
if self.touch₁ == touch {
self.touch₁ = nil
self.p₁ = nil
self.p₁ʹ = nil
} else if self.touch₂ == touch {
self.touch₂ = nil
self.p₂ = nil
self.p₂ʹ = nil
}
}
self.solutionMatrix = self._computeSolutionMatrix()
}
//MARK: Private Methods
private func _reset() {
guard
let image = self.image,
let cgimage = image.cgImage else {
return
}
let r = CGRect(x:0, y:0, width:cgimage.width, height:cgimage.height)
imageLayer.contents = cgimage;
imageLayer.bounds = r
imageLayer.position = ┼self.bounds
self.imageTransform = self._initialTransform()
}
private func _normalizeTransform() -> CGAffineTransform {
let center = ┼self.bounds
return CGAffineTransform(translationX: center.x, y: center.y)
}
private func _backNormalizeTransform() -> CGAffineTransform {
return self._normalizeTransform().inverted();
}
private func _initialTransform() -> CGAffineTransform {
guard let image = self.image, let cgimage = image.cgImage else {
return CGAffineTransform.identity;
}
let r = CGRect(x:0, y:0, width:cgimage.width, height:cgimage.height)
let s = r.scaleIn(rect: self.bounds)
return CGAffineTransform(scaleX: s, y: s)
}
private func _adjustScaleForBoundsChange() {
guard let image = self.image, let cgimage = image.cgImage else {
return
}
let r = CGRect(x:0, y:0, width:cgimage.width, height:cgimage.height)
let oldIdeal = r.scaleAndCenterIn(rect: self.oldBounds)
let newIdeal = r.scaleAndCenterIn(rect: self.bounds)
let s = newIdeal.height / oldIdeal.height
self.imageTransform = self.imageTransform.scaledBy(x: s, y: s)
}
private func _computeSolutionMatrix() -> HXMatrix? {
if let q₁ = self.p₁, let q₂ = self.p₂ {
return _computeSolutionMatrix(q₁, q₂)
} else if let q₁ = self.p₁, let q₁ʹ = self.p₁ʹ {
let q₂ = CGPoint(x: q₁ʹ.x + 10, y: q₁ʹ.y + 10).applying(self.backTransform)
return _computeSolutionMatrix(q₁, q₂)
} else if let q₂ = self.p₂, let q₂ʹ = self.p₂ʹ {
let q₁ = CGPoint(x: q₂ʹ.x + 10, y: q₂ʹ.y + 10).applying(self.backTransform)
return _computeSolutionMatrix(q₂, q₁)
}
return nil
}
private func _computeSolutionMatrix(_ q₁:CGPoint, _ q₂:CGPoint) -> HXMatrix {
let x₁ = Double(q₁.x)
let y₁ = Double(q₁.y)
let x₂ = Double(q₂.x)
let y₂ = Double(q₂.y)
let A = HXMatrix(rows: 4, columns: 4, values:[
x₁, -y₁, 1, 0,
y₁, x₁, 0, 1,
x₂, -y₂, 1, 0,
y₂, x₂, 0, 1
])
return A.inverse()
}
private func _computeTransform(_ q₁ʹ:CGPoint, _ q₂ʹ:CGPoint) -> CGAffineTransform {
guard let solutionMatrix = self.solutionMatrix else {
return CGAffineTransform.identity
}
let B = HXMatrix(rows: 4, columns: 1, values: [
Double(q₁ʹ.x),
Double(q₁ʹ.y),
Double(q₂ʹ.x),
Double(q₂ʹ.y)
])
let C = solutionMatrix ⋅ B
let U = CGFloat(C[0,0])
let V = CGFloat(C[1,0])
let tx = CGFloat(C[2,0])
let ty = CGFloat(C[3,0])
var t :CGAffineTransform = CGAffineTransform.identity
t.a = U; t.b = V
t.c = -V; t.d = U
t.tx = tx; t.ty = ty
return t
}
}
Here is my code:
viewDidLoad:
UIPinchGestureRecognizer *pinch = [[UIPinchGestureRecognizer alloc]initWithTarget:self action:#selector(pinch:)];
[self.canvas addGestureRecognizer:pinch];
pinch.delegate = self;
UIRotationGestureRecognizer *twoFingersRotate = [[UIRotationGestureRecognizer alloc] initWithTarget:self action:#selector(pinchRotate:)];
[[self canvas] addGestureRecognizer:twoFingersRotate];
twoFingersRotate.delegate = self;
Code For Pinches and Rotates:
-(BOOL)gestureRecognizer:(UIGestureRecognizer *)gestureRecognizer shouldRecognizeSimultaneouslyWithGestureRecognizer:(UIGestureRecognizer *)otherGestureRecognizer
{
return YES;
}
-(void)pinchRotate:(UIRotationGestureRecognizer*)rotate
{
SMImage *selectedImage = [DataCenter sharedDataCenter].selectedImage;
switch (rotate.state)
{
case UIGestureRecognizerStateBegan:
{
selectedImage.referenceTransform = selectedImage.transform;
break;
}
case UIGestureRecognizerStateChanged:
{
selectedImage.transform = CGAffineTransformRotate(selectedImage.referenceTransform, ([rotate rotation] * 55) * M_PI/180);
break;
}
default:
break;
}
}
-(void)pinch:(UIPinchGestureRecognizer*)pinch
{
SMImage *selectedImage = [DataCenter sharedDataCenter].selectedImage;
[self itemSelected];
switch (pinch.state)
{
case UIGestureRecognizerStateBegan:
{
selectedImage.referenceTransform = selectedImage.transform;
break;
}
case UIGestureRecognizerStateChanged:
{
CGAffineTransform transform = CGAffineTransformScale(selectedImage.referenceTransform, pinch.scale, pinch.scale);
selectedImage.transform = transform;
break;
}
default:
break;
}
}
My rotation works great on its own and my scale works great on its own, but they wont work together. One always works or the other doesn't. When I implement shouldRecognizeSimultaneouslyWithGestureRecognizer the two gestures seem to fight against each other and produce poor results. What am I missing? (Yes I have implemented <UIGestureRecognizerDelegate>)
Every time pinch: is called, you just compute the transform based on the pinch recognizer's scale. Every time pinchRotate: is called, you just compute the transform based on the rotation recognizer's rotation. You never combine the scale and the rotation into one transform.
Here's an approach. Give yourself one new instance variable, _activeRecognizers:
NSMutableSet *_activeRecognizers;
Initialize it in viewDidLoad:
_activeRecognizers = [NSMutableSet set];
Use one method as the action for both recognizers:
- (IBAction)handleGesture:(UIGestureRecognizer *)recognizer
{
SMImage *selectedImage = [DataCenter sharedDataCenter].selectedImage;
switch (recognizer.state) {
case UIGestureRecognizerStateBegan:
if (_activeRecognizers.count == 0)
selectedImage.referenceTransform = selectedImage.transform;
[_activeRecognizers addObject:recognizer];
break;
case UIGestureRecognizerStateEnded:
selectedImage.referenceTransform = [self applyRecognizer:recognizer toTransform:selectedImage.referenceTransform];
[_activeRecognizers removeObject:recognizer];
break;
case UIGestureRecognizerStateChanged: {
CGAffineTransform transform = selectedImage.referenceTransform;
for (UIGestureRecognizer *recognizer in _activeRecognizers)
transform = [self applyRecognizer:recognizer toTransform:transform];
selectedImage.transform = transform;
break;
}
default:
break;
}
}
You'll need this helper method:
- (CGAffineTransform)applyRecognizer:(UIGestureRecognizer *)recognizer toTransform:(CGAffineTransform)transform
{
if ([recognizer respondsToSelector:#selector(rotation)])
return CGAffineTransformRotate(transform, [(UIRotationGestureRecognizer *)recognizer rotation]);
else if ([recognizer respondsToSelector:#selector(scale)]) {
CGFloat scale = [(UIPinchGestureRecognizer *)recognizer scale];
return CGAffineTransformScale(transform, scale, scale);
}
else
return transform;
}
This works if you're just allowing rotating and scaling. (I even tested it!)
If you want to add panning, use a separate action method and just adjust selectedImage.center. Trying to do panning with rotation and scaling using selectedImage.transform is much more complicated.
Swift 3 with Pan, Rotate and Pinch
// MARK: - Gesturies
func transformUsingRecognizer(_ recognizer: UIGestureRecognizer, transform: CGAffineTransform) -> CGAffineTransform {
if let rotateRecognizer = recognizer as? UIRotationGestureRecognizer {
return transform.rotated(by: rotateRecognizer.rotation)
}
if let pinchRecognizer = recognizer as? UIPinchGestureRecognizer {
let scale = pinchRecognizer.scale
return transform.scaledBy(x: scale, y: scale)
}
if let panRecognizer = recognizer as? UIPanGestureRecognizer {
let deltaX = panRecognizer.translation(in: imageView).x
let deltaY = panRecognizer.translation(in: imageView).y
return transform.translatedBy(x: deltaX, y: deltaY)
}
return transform
}
var initialTransform: CGAffineTransform?
var gestures = Set<UIGestureRecognizer>(minimumCapacity: 3)
#IBAction func processTransform(_ sender: Any) {
let gesture = sender as! UIGestureRecognizer
switch gesture.state {
case .began:
if gestures.count == 0 {
initialTransform = imageView.transform
}
gestures.insert(gesture)
case .changed:
if var initial = initialTransform {
gestures.forEach({ (gesture) in
initial = transformUsingRecognizer(gesture, transform: initial)
})
imageView.transform = initial
}
case .ended:
gestures.remove(gesture)
default:
break
}
}
func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIGestureRecognizer) -> Bool {
return true
}
For this to happen you need to implement gesture delegate shouldRecognizeSimultaneouslyWithGestureRecognizer and put what gestures you would like to recognize simultaneously.
// ensure that the pinch and rotate gesture recognizers on a particular view can all recognize simultaneously
// prevent other gesture recognizers from recognizing simultaneously
- (BOOL)gestureRecognizer:(UIGestureRecognizer *)gestureRecognizer shouldRecognizeSimultaneouslyWithGestureRecognizer:(UIGestureRecognizer *)otherGestureRecognizer
{
// if the gesture recognizers's view isn't one of our views, don't allow simultaneous recognition
if (gestureRecognizer.view != firstView && gestureRecognizer.view != secondView)
return NO;
// if the gesture recognizers are on different views, don't allow simultaneous recognition
if (gestureRecognizer.view != otherGestureRecognizer.view)
return NO;
// if either of the gesture recognizers is the long press, don't allow simultaneous recognition
if ([gestureRecognizer isKindOfClass:[UILongPressGestureRecognizer class]] || [otherGestureRecognizer isKindOfClass:[UILongPressGestureRecognizer class]])
return NO;
return YES;
}
This code needs to be modified to the view for which you want simultaneous gesture recognisers. The above code is what you need.
This example does not use gesture recognizers, and directly computes the transformation matrix. It also properly handles one-to-two finger transitions.
class PincherView: UIView {
override var bounds :CGRect {
willSet(newBounds) {
oldBounds = self.bounds
} didSet {
self.imageLayer.position = ┼self.bounds
self._adjustScaleForBoundsChange()
}
}
var oldBounds :CGRect
var touch₁ :UITouch?
var touch₂ :UITouch?
var p₁ :CGPoint? // point 1 in image coordiate system
var p₂ :CGPoint? // point 2 in image coordinate system
var p₁ʹ :CGPoint? // point 1 in view coordinate system
var p₂ʹ :CGPoint? // point 2 in view coordinate system
var image :UIImage? {
didSet {self._reset()}
}
var imageLayer :CALayer
var imageTransform :CGAffineTransform {
didSet {
self.backTransform = self.imageTransform.inverted()
self.imageLayer.transform = CATransform3DMakeAffineTransform(self.imageTransform)
}
}
var backTransform :CGAffineTransform
var solutionMatrix :HXMatrix?
required init?(coder aDecoder: NSCoder) {
self.oldBounds = CGRect.zero
let layer = CALayer();
self.imageLayer = layer
self.imageTransform = CGAffineTransform.identity
self.backTransform = CGAffineTransform.identity
super.init(coder: aDecoder)
self.oldBounds = self.bounds
self.isMultipleTouchEnabled = true
self.layer.addSublayer(layer)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
let pʹ = touch.location(in: self).applying(self._backNormalizeTransform())
let p = pʹ.applying(self.backTransform)
if self.touch₁ == nil {
self.touch₁ = touch
self.p₁ʹ = pʹ
self.p₁ = p
} else if self.touch₂ == nil {
self.touch₂ = touch
self.p₂ʹ = pʹ
self.p₂ = p
}
}
self.solutionMatrix = self._computeSolutionMatrix()
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
let pʹ = touch.location(in: self).applying(self._backNormalizeTransform())
if self.touch₁ == touch {
self.p₁ʹ = pʹ
} else if self.touch₂ == touch {
self.p₂ʹ = pʹ
}
}
CATransaction.begin()
CATransaction.setValue(true, forKey:kCATransactionDisableActions)
// Whether you're using 1 finger or 2 fingers
if let q₁ʹ = self.p₁ʹ, let q₂ʹ = self.p₂ʹ {
self.imageTransform = self._computeTransform(q₁ʹ, q₂ʹ)
} else if let q₁ʹ = (self.p₁ʹ != nil ? self.p₁ʹ : self.p₂ʹ) {
self.imageTransform = self._computeTransform(q₁ʹ, CGPoint(x:q₁ʹ.x + 10, y:q₁ʹ.y + 10))
}
CATransaction.commit()
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
if self.touch₁ == touch {
self.touch₁ = nil
self.p₁ = nil
self.p₁ʹ = nil
} else if self.touch₂ == touch {
self.touch₂ = nil
self.p₂ = nil
self.p₂ʹ = nil
}
}
self.solutionMatrix = self._computeSolutionMatrix()
}
//MARK: Private Methods
private func _reset() {
guard
let image = self.image,
let cgimage = image.cgImage else {
return
}
let r = CGRect(x:0, y:0, width:cgimage.width, height:cgimage.height)
imageLayer.contents = cgimage;
imageLayer.bounds = r
imageLayer.position = ┼self.bounds
self.imageTransform = self._initialTransform()
}
private func _normalizeTransform() -> CGAffineTransform {
let center = ┼self.bounds
return CGAffineTransform(translationX: center.x, y: center.y)
}
private func _backNormalizeTransform() -> CGAffineTransform {
return self._normalizeTransform().inverted();
}
private func _initialTransform() -> CGAffineTransform {
guard let image = self.image, let cgimage = image.cgImage else {
return CGAffineTransform.identity;
}
let r = CGRect(x:0, y:0, width:cgimage.width, height:cgimage.height)
let s = r.scaleIn(rect: self.bounds)
return CGAffineTransform(scaleX: s, y: s)
}
private func _adjustScaleForBoundsChange() {
guard let image = self.image, let cgimage = image.cgImage else {
return
}
let r = CGRect(x:0, y:0, width:cgimage.width, height:cgimage.height)
let oldIdeal = r.scaleAndCenterIn(rect: self.oldBounds)
let newIdeal = r.scaleAndCenterIn(rect: self.bounds)
let s = newIdeal.height / oldIdeal.height
self.imageTransform = self.imageTransform.scaledBy(x: s, y: s)
}
private func _computeSolutionMatrix() -> HXMatrix? {
if let q₁ = self.p₁, let q₂ = self.p₂ {
return _computeSolutionMatrix(q₁, q₂)
} else if let q₁ = self.p₁, let q₁ʹ = self.p₁ʹ {
let q₂ = CGPoint(x: q₁ʹ.x + 10, y: q₁ʹ.y + 10).applying(self.backTransform)
return _computeSolutionMatrix(q₁, q₂)
} else if let q₂ = self.p₂, let q₂ʹ = self.p₂ʹ {
let q₁ = CGPoint(x: q₂ʹ.x + 10, y: q₂ʹ.y + 10).applying(self.backTransform)
return _computeSolutionMatrix(q₂, q₁)
}
return nil
}
private func _computeSolutionMatrix(_ q₁:CGPoint, _ q₂:CGPoint) -> HXMatrix {
let x₁ = Double(q₁.x)
let y₁ = Double(q₁.y)
let x₂ = Double(q₂.x)
let y₂ = Double(q₂.y)
let A = HXMatrix(rows: 4, columns: 4, values:[
x₁, -y₁, 1, 0,
y₁, x₁, 0, 1,
x₂, -y₂, 1, 0,
y₂, x₂, 0, 1
])
return A.inverse()
}
private func _computeTransform(_ q₁ʹ:CGPoint, _ q₂ʹ:CGPoint) -> CGAffineTransform {
guard let solutionMatrix = self.solutionMatrix else {
return CGAffineTransform.identity
}
let B = HXMatrix(rows: 4, columns: 1, values: [
Double(q₁ʹ.x),
Double(q₁ʹ.y),
Double(q₂ʹ.x),
Double(q₂ʹ.y)
])
let C = solutionMatrix ⋅ B
let U = CGFloat(C[0,0])
let V = CGFloat(C[1,0])
let tx = CGFloat(C[2,0])
let ty = CGFloat(C[3,0])
var t :CGAffineTransform = CGAffineTransform.identity
t.a = U; t.b = V
t.c = -V; t.d = U
t.tx = tx; t.ty = ty
return t
}
}