I don't want my background image to be too blury. Isn't there a property to adjust the blur intensity?
let blurEffect = UIBlurEffect(style: UIBlurEffectStyle.Light)
blurEffect.???
let effectView = UIVisualEffectView(effect: blurEffect)
effectView.frame = backgroundAlbumCover.bounds
backgroundAlbumCover.addSubview(effectView)
You can do that in super elegant way with animator
(reducing UIVisualEffectView alpha will not affect blur intensity, so we must use animator)
Usage as simple as:
let blurEffectView = BlurEffectView()
view.addSubview(blurEffectView)
BlurEffectView realisation:
class BlurEffectView: UIVisualEffectView {
var animator = UIViewPropertyAnimator(duration: 1, curve: .linear)
override func didMoveToSuperview() {
guard let superview = superview else { return }
backgroundColor = .clear
frame = superview.bounds //Or setup constraints instead
setupBlur()
}
private func setupBlur() {
animator.stopAnimation(true)
effect = nil
animator.addAnimations { [weak self] in
self?.effect = UIBlurEffect(style: .dark)
}
animator.fractionComplete = 0.1 //This is your blur intensity in range 0 - 1
}
deinit {
animator.stopAnimation(true)
}
}
Adjusting the blur itself is not possible... But, you can adjust how visible the blur view is. This can be done in a number of ways, only three of which I can think of at the moment:
1st Option: Adjust the alpha of your UIVisualEffectView instance e.g:
effectView.alpha = 0.4f;
2nd Option: Add a UIView instance to effectView at Index 0 and adjust the alpha of this UIView instance. e.g:
UIView *blurDilutionView = [[UIView alloc] initWithFrame: effectView.frame];
blurDilutionView.backgroundColor = [[UIColor whiteColor] colorWithAlphaComponent: 0.5];
blurDilutionView.autoresizingMask = UIViewAutoresizingFlexibleTopMargin|UIViewAutoresizingFlexibleBottomMargin|UIViewAutoresizingFlexibleWidth|UIViewAutoresizingFlexibleHeight;//if not using AutoLayout
[effectView insertSubview:blurDilutionView atIndex:0];
3rd Option: use multiple UIVisualEffectView instances (I have not tried this yet, more of an idea). Apply an alpha of 0.1f on each. The more UIVisualEffectView views you have the more blurry the overall look. Once again, I have not tried this option yet!
Update:
As Axeva mentioned in the comments, Apple advises against adjusting the alpha to change the blur. So use these suggestions at your own potential peril.
Once I ran into a problem to create a blur effect that is darker than .light and lighter than .dark UIBlurEffect style.
To achieve that, put a view on the back with the color and alpha you need:
let pictureImageView = // Image that you need to blur
let backView = UIView(frame: pictureImageView.bounds)
backView.backgroundColor = UIColor(red: 100/255, green: 100/255, blue: 100/255, alpha: 0.3)
pictureImageView.addSubview(backView)
let blurEffect = UIBlurEffect(style: .light)
let blurEffectView = UIVisualEffectView(effect: blurEffect)
blurEffectView.frame = pictureImageView.bounds
pictureImageView.addSubview(blurEffectView)
How the result looks like:
For more details, check out this article.
UPDATE: apparently there is another nice (maybe even nicer) way to implement the Blur using CIFilter(name: "CIGaussianBlur").
It allows the make “opacity” and blur’s strengths much lower than UIBlurEffect.
Use Private API if you want. Tested on iOS 13.7, 14.8, 15.5, 16.0. Does not work with Mac Catalyst.
Sample
UIVisualEffectView+Intensity.h
#import <UIKit/UIKit.h>
NS_ASSUME_NONNULL_BEGIN
#interface UIVisualEffectView (Intensity)
#property (nonatomic) CGFloat intensity;
#end
NS_ASSUME_NONNULL_END
UIVisualEffectView+Intensity.m
#import "UIVisualEffectView+Intensity.h"
#import <objc/message.h>
#interface UIVisualEffectView (Intensity)
#property (readonly) id backgroundHost; // _UIVisualEffectHost
#property (readonly) __kindof UIView *backdropView; // _UIVisualEffectBackdropView
#end
#implementation UIVisualEffectView (Intensity)
- (id)backgroundHost {
id backgroundHost = ((id (*)(id, SEL))objc_msgSend)(self, NSSelectorFromString(#"_backgroundHost")); // _UIVisualEffectHost
return backgroundHost;
}
- (__kindof UIView * _Nullable)backdropView {
__kindof UIView *backdropView = ((__kindof UIView * (*)(id, SEL))objc_msgSend)(self.backgroundHost, NSSelectorFromString(#"contentView")); // _UIVisualEffectBackdropView
return backdropView;
}
- (CGFloat)intensity {
__kindof UIView *backdropView = self.backdropView; // _UIVisualEffectBackdropView
__kindof CALayer *backdropLayer = ((__kindof CALayer * (*)(id, SEL))objc_msgSend)(backdropView, NSSelectorFromString(#"backdropLayer")); // UICABackdropLayer
NSArray *filters = backdropLayer.filters;
id _Nullable __block gaussianBlur = nil; // CAFilter
[filters enumerateObjectsUsingBlock:^(id _Nonnull obj, NSUInteger idx, BOOL * _Nonnull stop) {
if (![obj respondsToSelector:NSSelectorFromString(#"type")]) return;
NSString *type = ((NSString * (*)(id, SEL))objc_msgSend)(obj, NSSelectorFromString(#"type"));
if (![type isKindOfClass:[NSString class]]) return;
if ([type isEqualToString:#"gaussianBlur"]) {
gaussianBlur = obj;
*stop = YES;
}
}];
if (gaussianBlur == nil) return 0.0f;
NSNumber * _Nullable inputRadius = [gaussianBlur valueForKeyPath:#"inputRadius"];
if ((inputRadius == nil) || (![inputRadius isKindOfClass:[NSNumber class]])) return 0.0f;
return [inputRadius floatValue];
}
- (void)setIntensity:(CGFloat)intensity {
id descriptor = ((id (*)(id, SEL, id, BOOL))objc_msgSend)(self, NSSelectorFromString(#"_effectDescriptorForEffects:usage:"), #[self.effect], YES); // _UIVisualEffectDescriptor
NSArray *filterEntries = ((NSArray * (*)(id, SEL))objc_msgSend)(descriptor, NSSelectorFromString(#"filterEntries")); // NSArray<_UIVisualEffectFilterEntry *>
id _Nullable __block gaussianBlur = nil; // _UIVisualEffectFilterEntry
[filterEntries enumerateObjectsUsingBlock:^(id _Nonnull obj, NSUInteger idx, BOOL * _Nonnull stop) {
NSString *filterType = ((NSString * (*)(id, SEL))objc_msgSend)(obj, NSSelectorFromString(#"filterType"));
if ([filterType isEqualToString:#"gaussianBlur"]) {
gaussianBlur = obj;
*stop = YES;
}
}];
if (gaussianBlur == nil) return;
NSMutableDictionary *requestedValues = [((NSDictionary * (*)(id, SEL))objc_msgSend)(gaussianBlur, NSSelectorFromString(#"requestedValues")) mutableCopy];
if (![requestedValues.allKeys containsObject:#"inputRadius"]) {
NSLog(#"Not supported effect.");
return;
}
requestedValues[#"inputRadius"] = [NSNumber numberWithFloat:intensity];
((void (*)(id, SEL, NSDictionary *))objc_msgSend)(gaussianBlur, NSSelectorFromString(#"setRequestedValues:"), requestedValues);
((void (*)(id, SEL, id))objc_msgSend)(self.backgroundHost, NSSelectorFromString(#"setCurrentEffectDescriptor:"), descriptor);
((void (*)(id, SEL))objc_msgSend)(self.backdropView, NSSelectorFromString(#"applyRequestedFilterEffects"));
}
#end
Usage
let firstBlurView: UIVisualEffectView = .init(effect: UIBlurEffect(style: .dark))
// setter
firstBlurView.intensity = 7
// getter
print(firstBlurView.intensity) // 7.0
UIBlurEffect doesn't provide such a property. If you want another intensity, you will have to make a BlurEffect by yourself.
Here is the BlurEffectView class with public intensity setter as well as with conformance to Apple's UIView.animation functions (you can animate intensity by UIKit's animations)
BlurEffectView.swift
import UIKit
public class BlurEffectView: UIView {
public override class var layerClass: AnyClass {
return BlurIntensityLayer.self
}
#objc
#IBInspectable
public dynamic var intensity: CGFloat {
set { self.blurIntensityLayer.intensity = newValue }
get { return self.blurIntensityLayer.intensity }
}
#IBInspectable
public var effect = UIBlurEffect(style: .dark) {
didSet {
self.setupPropertyAnimator()
}
}
private let visualEffectView = UIVisualEffectView(effect: nil)
private var propertyAnimator: UIViewPropertyAnimator!
private var blurIntensityLayer: BlurIntensityLayer {
return self.layer as! BlurIntensityLayer
}
public override init(frame: CGRect) {
super.init(frame: frame)
self.setupView()
}
public required init?(coder: NSCoder) {
super.init(coder: coder)
self.setupView()
}
deinit {
self.propertyAnimator.stopAnimation(true)
}
private func setupPropertyAnimator() {
self.propertyAnimator?.stopAnimation(true)
self.propertyAnimator = UIViewPropertyAnimator(duration: 1, curve: .linear)
self.propertyAnimator.addAnimations { [weak self] in
self?.visualEffectView.effect = self?.effect
}
self.propertyAnimator.pausesOnCompletion = true
}
private func setupView() {
self.backgroundColor = .clear
self.isUserInteractionEnabled = false
self.addSubview(self.visualEffectView)
self.visualEffectView.fill(view: self)
self.setupPropertyAnimator()
}
public override func display(_ layer: CALayer) {
guard let presentationLayer = layer.presentation() as? BlurIntensityLayer else {
return
}
let clampedIntensity = max(0.0, min(1.0, presentationLayer.intensity))
self.propertyAnimator.fractionComplete = clampedIntensity
}
}
BlurIntensityLayer.swift
import QuartzCore
class BlurIntensityLayer: CALayer {
#NSManaged var intensity: CGFloat
override init(layer: Any) {
super.init(layer: layer)
if let layer = layer as? BlurIntensityLayer {
self.intensity = layer.intensity
}
}
override init() {
super.init()
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
}
override class func needsDisplay(forKey key: String) -> Bool {
key == #keyPath(intensity) ? true : super.needsDisplay(forKey: key)
}
override func action(forKey event: String) -> CAAction? {
guard event == #keyPath(intensity) else {
return super.action(forKey: event)
}
let animation = CABasicAnimation(keyPath: event)
animation.toValue = nil
animation.fromValue = (self.presentation() ?? self).intensity
return animation
}
}
Related
I would like to implement a resizable and draggable UIView like the following picture shows:
This is the class I have implemented up to now using simple gestures recognizers:
class PincherView: UIView {
var pinchRec:UIPinchGestureRecognizer!
var rotateRec:UIRotationGestureRecognizer!
var panRec:UIPanGestureRecognizer!
var containerView:UIView!
init(size: CGRect, container:UIView) {
super.init(frame: size)
self.containerView = container
self.pinchRec = UIPinchGestureRecognizer(target: self, action: "handlePinch:")
self.addGestureRecognizer(self.pinchRec)
self.rotateRec = UIRotationGestureRecognizer(target: self, action: "handleRotate:")
self.addGestureRecognizer(self.rotateRec)
self.panRec = UIPanGestureRecognizer(target: self, action: "handlePan:")
self.addGestureRecognizer(self.panRec)
self.backgroundColor = UIColor(red: 0.0, green: 0.6, blue: 1.0, alpha: 0.4)
self.userInteractionEnabled = true
self.multipleTouchEnabled = true
self.hidden = false
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
func handlePinch(recognizer : UIPinchGestureRecognizer) {
if let view = recognizer.view {
view.transform = CGAffineTransformScale(view.transform, 1.0, recognizer.scale)
//view.transform = CGAffineTransformScale(view.transform, recognizer.scale, recognizer.scale)
recognizer.scale = 1
}
}
func handleRotate(recognizer : UIRotationGestureRecognizer) {
if let view = recognizer.view {
view.transform = CGAffineTransformRotate(view.transform, recognizer.rotation)
recognizer.rotation = 0
}
}
func handlePan(recognizer:UIPanGestureRecognizer) {
let translation = recognizer.translationInView(containerView)
if let view = recognizer.view {
view.center = CGPoint(x:view.center.x + translation.x,
y:view.center.y + translation.y)
}
recognizer.setTranslation(CGPointZero, inView: containerView)
}
}
As you can imagine, with a simple UIPinchGestureRecognizer the uiview is scaled equally both in x and y directions but I want the users to have the chance to be as precise al possibile so that they can either scale even in one single direction or alter the form of the uiview, not necessarily to be a rectangle. Moreover, it's more important for me that user is as precise as possible in setting the heigth of the uiviewThat's why I need to have those 4 buttons at the corners.
I just would like you to give me some hints from where to start from and how.
Thank you
UPDATE
This is what I have done up to know
In this way I have the following hierarchy:
-UIImageView embed in
-UIView embed in
- UIScrollView embed in
- UIViewController
I had to add a further UIView to use as container, so that all sub uiviews it contains will be zoomed in/out together with the picture in order to have more precision in order to better overlap the azure uiview to photographed objects.
Concerning the pin, the little white circle, this is the class I have tried to implement up to know:
class PinImageView: UIImageView {
var lastLocation:CGPoint!
var container:UIView!
var viewToScale:UIView!
var id:String!
init(imageIcon: UIImage?, location:CGPoint, container:UIView, viewToScale:UIView, id:String) {
super.init(image: imageIcon)
self.lastLocation = location
self.frame = CGRect(x: location.x, y: location.y, width: 10.0, height: 10.0)
self.center = location
self.userInteractionEnabled = true
self.container = container
self.viewToScale = viewToScale
self.id = id
self.hidden = false
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
func getScaleParameters(arrivalPoint:CGPoint) -> (sx:CGFloat, sy:CGFloat) {
//var result:(sx:CGFloat, sy:CGFloat) = (1.0, 1.0)
var scaleX:CGFloat = 1.0
var scaleY:CGFloat = 1.0
switch (self.id) {
case "up_left":
/* up_left is moving towards right*/
if (self.lastLocation.x < arrivalPoint.x) {
scaleX = -0.99
}
/* up_left is moving towards left*/
else {
scaleX = 0.9
}
/* up_left is moving towards up*/
if (self.lastLocation.y < arrivalPoint.y) {
scaleY = -0.9
}
/* up_left is moving towards down*/
else {
scaleY = 0.9
}
break
default:
scaleX = 1.0
scaleY = 1.0
}
return (scaleX, scaleY)
}
override func touchesMoved(touches: Set<UITouch>, withEvent event: UIEvent?) {
if let touch: UITouch = touches.first {
let scaleParameters:(sx:CGFloat, sy:CGFloat) = self.getScaleParameters(touch.locationInView(self.container))
self.viewToScale.transform = CGAffineTransformMakeScale(scaleParameters.sx, scaleParameters.sy)
//self.center = touch.locationInView(self.container)
self.center = touch.locationInView(self.container)
self.lastLocation = touch.locationInView(self.container)
}
}
}
where viewToScale is the azure floating uiview called pincherViewRec. The center of the pin at the beginning is in pincherViewRec.frame.origin. It's useless to say that it does not work as expected.
Do you have any idea? Any hint would be appreciated to help me getting the right way.
Here is my code:
viewDidLoad:
UIPinchGestureRecognizer *pinch = [[UIPinchGestureRecognizer alloc]initWithTarget:self action:#selector(pinch:)];
[self.canvas addGestureRecognizer:pinch];
pinch.delegate = self;
UIRotationGestureRecognizer *twoFingersRotate = [[UIRotationGestureRecognizer alloc] initWithTarget:self action:#selector(pinchRotate:)];
[[self canvas] addGestureRecognizer:twoFingersRotate];
twoFingersRotate.delegate = self;
Code For Pinches and Rotates:
-(BOOL)gestureRecognizer:(UIGestureRecognizer *)gestureRecognizer shouldRecognizeSimultaneouslyWithGestureRecognizer:(UIGestureRecognizer *)otherGestureRecognizer
{
return YES;
}
-(void)pinchRotate:(UIRotationGestureRecognizer*)rotate
{
SMImage *selectedImage = [DataCenter sharedDataCenter].selectedImage;
switch (rotate.state)
{
case UIGestureRecognizerStateBegan:
{
selectedImage.referenceTransform = selectedImage.transform;
break;
}
case UIGestureRecognizerStateChanged:
{
selectedImage.transform = CGAffineTransformRotate(selectedImage.referenceTransform, ([rotate rotation] * 55) * M_PI/180);
break;
}
default:
break;
}
}
-(void)pinch:(UIPinchGestureRecognizer*)pinch
{
SMImage *selectedImage = [DataCenter sharedDataCenter].selectedImage;
[self itemSelected];
switch (pinch.state)
{
case UIGestureRecognizerStateBegan:
{
selectedImage.referenceTransform = selectedImage.transform;
break;
}
case UIGestureRecognizerStateChanged:
{
CGAffineTransform transform = CGAffineTransformScale(selectedImage.referenceTransform, pinch.scale, pinch.scale);
selectedImage.transform = transform;
break;
}
default:
break;
}
}
My rotation works great on its own and my scale works great on its own, but they wont work together. One always works or the other doesn't. When I implement shouldRecognizeSimultaneouslyWithGestureRecognizer the two gestures seem to fight against each other and produce poor results. What am I missing? (Yes I have implemented <UIGestureRecognizerDelegate>)
Every time pinch: is called, you just compute the transform based on the pinch recognizer's scale. Every time pinchRotate: is called, you just compute the transform based on the rotation recognizer's rotation. You never combine the scale and the rotation into one transform.
Here's an approach. Give yourself one new instance variable, _activeRecognizers:
NSMutableSet *_activeRecognizers;
Initialize it in viewDidLoad:
_activeRecognizers = [NSMutableSet set];
Use one method as the action for both recognizers:
- (IBAction)handleGesture:(UIGestureRecognizer *)recognizer
{
SMImage *selectedImage = [DataCenter sharedDataCenter].selectedImage;
switch (recognizer.state) {
case UIGestureRecognizerStateBegan:
if (_activeRecognizers.count == 0)
selectedImage.referenceTransform = selectedImage.transform;
[_activeRecognizers addObject:recognizer];
break;
case UIGestureRecognizerStateEnded:
selectedImage.referenceTransform = [self applyRecognizer:recognizer toTransform:selectedImage.referenceTransform];
[_activeRecognizers removeObject:recognizer];
break;
case UIGestureRecognizerStateChanged: {
CGAffineTransform transform = selectedImage.referenceTransform;
for (UIGestureRecognizer *recognizer in _activeRecognizers)
transform = [self applyRecognizer:recognizer toTransform:transform];
selectedImage.transform = transform;
break;
}
default:
break;
}
}
You'll need this helper method:
- (CGAffineTransform)applyRecognizer:(UIGestureRecognizer *)recognizer toTransform:(CGAffineTransform)transform
{
if ([recognizer respondsToSelector:#selector(rotation)])
return CGAffineTransformRotate(transform, [(UIRotationGestureRecognizer *)recognizer rotation]);
else if ([recognizer respondsToSelector:#selector(scale)]) {
CGFloat scale = [(UIPinchGestureRecognizer *)recognizer scale];
return CGAffineTransformScale(transform, scale, scale);
}
else
return transform;
}
This works if you're just allowing rotating and scaling. (I even tested it!)
If you want to add panning, use a separate action method and just adjust selectedImage.center. Trying to do panning with rotation and scaling using selectedImage.transform is much more complicated.
Swift 3 with Pan, Rotate and Pinch
// MARK: - Gesturies
func transformUsingRecognizer(_ recognizer: UIGestureRecognizer, transform: CGAffineTransform) -> CGAffineTransform {
if let rotateRecognizer = recognizer as? UIRotationGestureRecognizer {
return transform.rotated(by: rotateRecognizer.rotation)
}
if let pinchRecognizer = recognizer as? UIPinchGestureRecognizer {
let scale = pinchRecognizer.scale
return transform.scaledBy(x: scale, y: scale)
}
if let panRecognizer = recognizer as? UIPanGestureRecognizer {
let deltaX = panRecognizer.translation(in: imageView).x
let deltaY = panRecognizer.translation(in: imageView).y
return transform.translatedBy(x: deltaX, y: deltaY)
}
return transform
}
var initialTransform: CGAffineTransform?
var gestures = Set<UIGestureRecognizer>(minimumCapacity: 3)
#IBAction func processTransform(_ sender: Any) {
let gesture = sender as! UIGestureRecognizer
switch gesture.state {
case .began:
if gestures.count == 0 {
initialTransform = imageView.transform
}
gestures.insert(gesture)
case .changed:
if var initial = initialTransform {
gestures.forEach({ (gesture) in
initial = transformUsingRecognizer(gesture, transform: initial)
})
imageView.transform = initial
}
case .ended:
gestures.remove(gesture)
default:
break
}
}
func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIGestureRecognizer) -> Bool {
return true
}
For this to happen you need to implement gesture delegate shouldRecognizeSimultaneouslyWithGestureRecognizer and put what gestures you would like to recognize simultaneously.
// ensure that the pinch and rotate gesture recognizers on a particular view can all recognize simultaneously
// prevent other gesture recognizers from recognizing simultaneously
- (BOOL)gestureRecognizer:(UIGestureRecognizer *)gestureRecognizer shouldRecognizeSimultaneouslyWithGestureRecognizer:(UIGestureRecognizer *)otherGestureRecognizer
{
// if the gesture recognizers's view isn't one of our views, don't allow simultaneous recognition
if (gestureRecognizer.view != firstView && gestureRecognizer.view != secondView)
return NO;
// if the gesture recognizers are on different views, don't allow simultaneous recognition
if (gestureRecognizer.view != otherGestureRecognizer.view)
return NO;
// if either of the gesture recognizers is the long press, don't allow simultaneous recognition
if ([gestureRecognizer isKindOfClass:[UILongPressGestureRecognizer class]] || [otherGestureRecognizer isKindOfClass:[UILongPressGestureRecognizer class]])
return NO;
return YES;
}
This code needs to be modified to the view for which you want simultaneous gesture recognisers. The above code is what you need.
This example does not use gesture recognizers, and directly computes the transformation matrix. It also properly handles one-to-two finger transitions.
class PincherView: UIView {
override var bounds :CGRect {
willSet(newBounds) {
oldBounds = self.bounds
} didSet {
self.imageLayer.position = ┼self.bounds
self._adjustScaleForBoundsChange()
}
}
var oldBounds :CGRect
var touch₁ :UITouch?
var touch₂ :UITouch?
var p₁ :CGPoint? // point 1 in image coordiate system
var p₂ :CGPoint? // point 2 in image coordinate system
var p₁ʹ :CGPoint? // point 1 in view coordinate system
var p₂ʹ :CGPoint? // point 2 in view coordinate system
var image :UIImage? {
didSet {self._reset()}
}
var imageLayer :CALayer
var imageTransform :CGAffineTransform {
didSet {
self.backTransform = self.imageTransform.inverted()
self.imageLayer.transform = CATransform3DMakeAffineTransform(self.imageTransform)
}
}
var backTransform :CGAffineTransform
var solutionMatrix :HXMatrix?
required init?(coder aDecoder: NSCoder) {
self.oldBounds = CGRect.zero
let layer = CALayer();
self.imageLayer = layer
self.imageTransform = CGAffineTransform.identity
self.backTransform = CGAffineTransform.identity
super.init(coder: aDecoder)
self.oldBounds = self.bounds
self.isMultipleTouchEnabled = true
self.layer.addSublayer(layer)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
let pʹ = touch.location(in: self).applying(self._backNormalizeTransform())
let p = pʹ.applying(self.backTransform)
if self.touch₁ == nil {
self.touch₁ = touch
self.p₁ʹ = pʹ
self.p₁ = p
} else if self.touch₂ == nil {
self.touch₂ = touch
self.p₂ʹ = pʹ
self.p₂ = p
}
}
self.solutionMatrix = self._computeSolutionMatrix()
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
let pʹ = touch.location(in: self).applying(self._backNormalizeTransform())
if self.touch₁ == touch {
self.p₁ʹ = pʹ
} else if self.touch₂ == touch {
self.p₂ʹ = pʹ
}
}
CATransaction.begin()
CATransaction.setValue(true, forKey:kCATransactionDisableActions)
// Whether you're using 1 finger or 2 fingers
if let q₁ʹ = self.p₁ʹ, let q₂ʹ = self.p₂ʹ {
self.imageTransform = self._computeTransform(q₁ʹ, q₂ʹ)
} else if let q₁ʹ = (self.p₁ʹ != nil ? self.p₁ʹ : self.p₂ʹ) {
self.imageTransform = self._computeTransform(q₁ʹ, CGPoint(x:q₁ʹ.x + 10, y:q₁ʹ.y + 10))
}
CATransaction.commit()
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
if self.touch₁ == touch {
self.touch₁ = nil
self.p₁ = nil
self.p₁ʹ = nil
} else if self.touch₂ == touch {
self.touch₂ = nil
self.p₂ = nil
self.p₂ʹ = nil
}
}
self.solutionMatrix = self._computeSolutionMatrix()
}
//MARK: Private Methods
private func _reset() {
guard
let image = self.image,
let cgimage = image.cgImage else {
return
}
let r = CGRect(x:0, y:0, width:cgimage.width, height:cgimage.height)
imageLayer.contents = cgimage;
imageLayer.bounds = r
imageLayer.position = ┼self.bounds
self.imageTransform = self._initialTransform()
}
private func _normalizeTransform() -> CGAffineTransform {
let center = ┼self.bounds
return CGAffineTransform(translationX: center.x, y: center.y)
}
private func _backNormalizeTransform() -> CGAffineTransform {
return self._normalizeTransform().inverted();
}
private func _initialTransform() -> CGAffineTransform {
guard let image = self.image, let cgimage = image.cgImage else {
return CGAffineTransform.identity;
}
let r = CGRect(x:0, y:0, width:cgimage.width, height:cgimage.height)
let s = r.scaleIn(rect: self.bounds)
return CGAffineTransform(scaleX: s, y: s)
}
private func _adjustScaleForBoundsChange() {
guard let image = self.image, let cgimage = image.cgImage else {
return
}
let r = CGRect(x:0, y:0, width:cgimage.width, height:cgimage.height)
let oldIdeal = r.scaleAndCenterIn(rect: self.oldBounds)
let newIdeal = r.scaleAndCenterIn(rect: self.bounds)
let s = newIdeal.height / oldIdeal.height
self.imageTransform = self.imageTransform.scaledBy(x: s, y: s)
}
private func _computeSolutionMatrix() -> HXMatrix? {
if let q₁ = self.p₁, let q₂ = self.p₂ {
return _computeSolutionMatrix(q₁, q₂)
} else if let q₁ = self.p₁, let q₁ʹ = self.p₁ʹ {
let q₂ = CGPoint(x: q₁ʹ.x + 10, y: q₁ʹ.y + 10).applying(self.backTransform)
return _computeSolutionMatrix(q₁, q₂)
} else if let q₂ = self.p₂, let q₂ʹ = self.p₂ʹ {
let q₁ = CGPoint(x: q₂ʹ.x + 10, y: q₂ʹ.y + 10).applying(self.backTransform)
return _computeSolutionMatrix(q₂, q₁)
}
return nil
}
private func _computeSolutionMatrix(_ q₁:CGPoint, _ q₂:CGPoint) -> HXMatrix {
let x₁ = Double(q₁.x)
let y₁ = Double(q₁.y)
let x₂ = Double(q₂.x)
let y₂ = Double(q₂.y)
let A = HXMatrix(rows: 4, columns: 4, values:[
x₁, -y₁, 1, 0,
y₁, x₁, 0, 1,
x₂, -y₂, 1, 0,
y₂, x₂, 0, 1
])
return A.inverse()
}
private func _computeTransform(_ q₁ʹ:CGPoint, _ q₂ʹ:CGPoint) -> CGAffineTransform {
guard let solutionMatrix = self.solutionMatrix else {
return CGAffineTransform.identity
}
let B = HXMatrix(rows: 4, columns: 1, values: [
Double(q₁ʹ.x),
Double(q₁ʹ.y),
Double(q₂ʹ.x),
Double(q₂ʹ.y)
])
let C = solutionMatrix ⋅ B
let U = CGFloat(C[0,0])
let V = CGFloat(C[1,0])
let tx = CGFloat(C[2,0])
let ty = CGFloat(C[3,0])
var t :CGAffineTransform = CGAffineTransform.identity
t.a = U; t.b = V
t.c = -V; t.d = U
t.tx = tx; t.ty = ty
return t
}
}
I have a UIImagePicker that works perfect for a type of UIImagePickerControllerSourceTypePhotoLibrary, but when I use UIImagePickerControllerSourceTypeCamera, the editing box cannot move from the center of the image. So if the image is say taller than it is wide, the user cannot move the editing box to the top square of the image.
Anyone know why this would be the case? It only happens when the source is from the camera, not the library.
Edit: Some CODE!!!
if (actionSheet.tag == 2) {
if (buttonIndex == 0) { // Camera
// Check for camera
if ([UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypeCamera] == YES) {
// Create image picker controller
UIImagePickerController *imagePicker = [[UIImagePickerController alloc] init];
// Set source to the camera
imagePicker.sourceType = UIImagePickerControllerSourceTypeCamera;
imagePicker.allowsEditing = YES;
// Delegate is self
imagePicker.delegate = self;
// Show image picker
[self presentViewController:imagePicker
animated:YES
completion:^(void) {
}];
}
}
else if (buttonIndex == 1) { // Photo Library
if ([UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypePhotoLibrary] == YES) {
// Create image picker controller
UIImagePickerController *imagePicker = [[UIImagePickerController alloc] init];
// Set source to the camera
imagePicker.sourceType = UIImagePickerControllerSourceTypePhotoLibrary;
imagePicker.allowsEditing = YES;
// Delegate is self
imagePicker.delegate = self;
// Show image picker
[self presentViewController:imagePicker
animated:YES
completion:^(void) {
}];
}
}
So as you can see, I display them the exact same, but the camera edit acts differently than the photo library edit.
Looks like this behavior is just a bug in iOS 6... Basically you cannot move the editing box, it always bounces back to the middle unless you zoom in a bit. Hopefully they fix that soon.
Thanks yycking. This extension works. Except I added the method call inside viewDidLayoutSubviews so that I don't have to call it every time I want to open image picker.
Here's the full extenstion
extension UIImagePickerController {
open override var childForStatusBarHidden: UIViewController? {
return nil
}
open override var prefersStatusBarHidden: Bool {
return true
}
open override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
fixCannotMoveEditingBox()
}
func fixCannotMoveEditingBox() {
if let cropView = cropView,
let scrollView = scrollView,
scrollView.contentOffset.y == 0 {
var top: CGFloat = 0.0
if #available(iOS 11.0, *) {
top = cropView.frame.minY + self.view.safeAreaInsets.top
} else {
// Fallback on earlier versions
top = cropView.frame.minY
}
let bottom = scrollView.frame.height - cropView.frame.height - top
scrollView.contentInset = UIEdgeInsets(top: top, left: 0, bottom: bottom, right: 0)
var offset: CGFloat = 0
if scrollView.contentSize.height > scrollView.contentSize.width {
offset = 0.5 * (scrollView.contentSize.height - scrollView.contentSize.width)
}
scrollView.contentOffset = CGPoint(x: 0, y: -top + offset)
}
DispatchQueue.main.asyncAfter(deadline: .now() + 0.1) { [weak self] in
self?.fixCannotMoveEditingBox()
}
}
var cropView: UIView? {
return findCropView(from: self.view)
}
var scrollView: UIScrollView? {
return findScrollView(from: self.view)
}
func findCropView(from view: UIView) -> UIView? {
let width = UIScreen.main.bounds.width
let size = view.bounds.size
if width == size.height, width == size.height {
return view
}
for view in view.subviews {
if let cropView = findCropView(from: view) {
return cropView
}
}
return nil
}
func findScrollView(from view: UIView) -> UIScrollView? {
if let scrollView = view as? UIScrollView {
return scrollView
}
for view in view.subviews {
if let scrollView = findScrollView(from: view) {
return scrollView
}
}
return nil
}
}
Reset contentInset of scrollview:
extension UIImagePickerController {
func fixCannotMoveEditingBox() {
if let cropView = cropView,
let scrollView = scrollView,
scrollView.contentOffset.y == 0 {
let top = cropView.frame.minY + self.view.safeAreaInsets.top
let bottom = scrollView.frame.height - cropView.frame.height - top
scrollView.contentInset = UIEdgeInsets(top: top, left: 0, bottom: bottom, right: 0)
var offset: CGFloat = 0
if scrollView.contentSize.height > scrollView.contentSize.width {
offset = 0.5 * (scrollView.contentSize.height - scrollView.contentSize.width)
}
scrollView.contentOffset = CGPoint(x: 0, y: -top + offset)
}
DispatchQueue.main.asyncAfter(deadline: .now() + 0.1) { [weak self] in
self?.fixCannotMoveEditingBox()
}
}
var cropView: UIView? {
return findCropView(from: self.view)
}
var scrollView: UIScrollView? {
return findScrollView(from: self.view)
}
func findCropView(from view: UIView) -> UIView? {
let width = UIScreen.main.bounds.width
let size = view.bounds.size
if width == size.height, width == size.height {
return view
}
for view in view.subviews {
if let cropView = findCropView(from: view) {
return cropView
}
}
return nil
}
func findScrollView(from view: UIView) -> UIScrollView? {
if let scrollView = view as? UIScrollView {
return scrollView
}
for view in view.subviews {
if let scrollView = findScrollView(from: view) {
return scrollView
}
}
return nil
}
}
then call it
imagePickercontroller.fixCannotMoveEditingBox()
Here is an extension I end up using that works fine on both notch and non-notch devices. And works perfectly on iOS 15!
extension UIImagePickerController {
open override var childForStatusBarHidden: UIViewController? {
return nil
}
open override var prefersStatusBarHidden: Bool {
return true
}
open override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
fixCannotMoveEditingBox()
}
private func fixCannotMoveEditingBox() {
if let cropView = cropView, let scrollView = scrollView, scrollView.contentOffset.y == 0 {
let top: CGFloat = cropView.frame.minY + self.view.frame.minY
let bottom = scrollView.frame.height - cropView.frame.height - top
scrollView.contentInset = UIEdgeInsets(top: top, left: 0, bottom: bottom, right: 0)
var offset: CGFloat = 0
if scrollView.contentSize.height > scrollView.contentSize.width {
offset = 0.5 * (scrollView.contentSize.height - scrollView.contentSize.width)
}
scrollView.contentOffset = CGPoint(x: 0, y: -top + offset)
}
DispatchQueue.main.asyncAfter(deadline: .now() + 0.1) { [weak self] in
self?.fixCannotMoveEditingBox()
}
}
private var cropView: UIView? {
return findCropView(from: self.view)
}
private var scrollView: UIScrollView? {
return findScrollView(from: self.view)
}
private func findCropView(from view: UIView) -> UIView? {
let width = UIScreen.main.bounds.width
let size = view.bounds.size
if width == size.height, width == size.height {
return view
}
for view in view.subviews {
if let cropView = findCropView(from: view) {
return cropView
}
}
return nil
}
private func findScrollView(from view: UIView) -> UIScrollView? {
if let scrollView = view as? UIScrollView {
return scrollView
}
for view in view.subviews {
if let scrollView = findScrollView(from: view) {
return scrollView
}
}
return nil
}
}
p.s.: the only changes from other similar answers are:
dropping support for an old iOS version (like iOS 11);
a bit different way of calculating contentInset top property;
This is the default behavior the Image Picker Controller, you can not change it. The only other option is to create your own cropping utility. Check out the link below for an example:
https://github.com/ardalahmet/SSPhotoCropperViewController
I know, this is not a good solution, but it works.
I tested on iOS8+iPhone5, iOS9+iPhone6sPlus, iOS10+iPhone6, iOS10+iPhone6sPlus.
CAUTION: PLImageScrollView and PLCropOverlayCropView are UNDOCUMENTED classes.
- (void)showImagePickerControllerWithSourceType:(UIImagePickerControllerSourceType)sourceType {
UIImagePickerController *imagePickerController = [UIImagePickerController new];
imagePickerController.sourceType = sourceType;
imagePickerController.mediaTypes = #[(NSString *)kUTTypeImage];
imagePickerController.allowsEditing = YES;
imagePickerController.delegate = self;
[self presentViewController:imagePickerController animated:YES completion:^{
[self fxxxImagePickerController:imagePickerController];
}];
}
- (void)fxxxImagePickerController:(UIImagePickerController *)imagePickerController {
if (!imagePickerController
|| !imagePickerController.allowsEditing
|| imagePickerController.sourceType != UIImagePickerControllerSourceTypeCamera) {
return;
}
// !!!: UNDOCUMENTED CLASS
Class ScrollViewClass = NSClassFromString(#"PLImageScrollView");
Class CropViewClass = NSClassFromString(#"PLCropOverlayCropView");
[imagePickerController.view eachSubview:^BOOL(UIView *subview, NSInteger depth) {
if ([subview isKindOfClass:CropViewClass]) {
// 0. crop rect position
subview.frame = subview.superview.bounds;
}
else if ([subview isKindOfClass:[UIScrollView class]]
&& [subview isKindOfClass:ScrollViewClass]) {
BOOL isNewImageScrollView = !self->_imageScrollView;
self->_imageScrollView = (UIScrollView *)subview;
// 1. enable scrolling
CGSize size = self->_imageScrollView.frame.size;
CGFloat inset = ABS(size.width - size.height) / 2;
self->_imageScrollView.contentInset = UIEdgeInsetsMake(inset, 0, inset, 0);
// 2. centering image by default
if (isNewImageScrollView) {
CGSize contentSize = self->_imageScrollView.contentSize;
if (contentSize.height > contentSize.width) {
CGFloat offset = round((contentSize.height - contentSize.width) / 2 - inset);
self->_imageScrollView.contentOffset = CGPointMake(self->_imageScrollView.contentOffset.x, offset);
}
}
}
return YES;
}];
// prevent re-layout, maybe not necessary
#weakify(self, imagePickerController);
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(0.5 * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
#strongify(self, imagePickerController);
[self fxxxImagePickerController:imagePickerController];
});
}
EDIT: The eachSubview: method traverses all the subviews tree.
If you have set "View controller-based status bar appearance" to NO in info.plist and set status bar appearance as light using
UIApplication.shared.statusBarStyle = .lightContent
or using any other method , Then simply set the style as .default before presenting the image picker. for Eg:
imagePicker.allowsEditing = true
imagePicker.sourceType = .photoLibrary
UIApplication.shared.statusBarStyle = .default
present(imagePicker, animated: true, completion: nil)
Change the source type according to your need either as photoLibrary or camera and in completion block of your didFinishPickingMediaWithInfo add the following to completion block.
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
//let pickedImage = info[UIImagePickerControllerOriginalImage] as? UIImage
var pickedImage : UIImage?
if let img = info[UIImagePickerControllerEditedImage] as? UIImage
{
pickedImage = img
}
else if let img = info[UIImagePickerControllerOriginalImage] as? UIImage
{
pickedImage = img
}
dismiss(animated: true, completion: {
UIApplication.shared.statusBarStyle = .lightContent
})}
Apparently this is a workaround for the same.Hope this helps.
A workaround that solved it is to add an entry in info.plist with "View controller-based status bar appearance" set to NO
I'm a new learner of ios programming. I have tried to search with another example and more questions at stackoverflow but it's not my goal. I want to set an image of dot at index 0 of UIPageControl as similar as iPhone search homescreen. Have any way to do it ? Please explain me with some code or another useful link.
Thanks in advance
I have found a solution for this problem. I know it is not the way but it will work till iOS 8 will be launched in the market.
Reason for Crash:
in iOS 7 [self.subViews objectAtIndex: i] returns UIView Instead of UIImageView and setImage is not the property of UIView and the app crashes. I solve my problem using this following code.
Check Whether the subview is UIView(for iOS7) or UIImageView(for iOS6 or earlier). And If it is UIView I am going to add UIImageView as subview on that view and voila its working and not crash..!!
-(void) updateDots
{
for (int i = 0; i < [self.subviews count]; i++)
{
UIImageView * dot = [self imageViewForSubview: [self.subviews objectAtIndex: i]];
if (i == self.currentPage) dot.image = activeImage;
else dot.image = inactiveImage;
}
}
- (UIImageView *) imageViewForSubview: (UIView *) view
{
UIImageView * dot = nil;
if ([view isKindOfClass: [UIView class]])
{
for (UIView* subview in view.subviews)
{
if ([subview isKindOfClass:[UIImageView class]])
{
dot = (UIImageView *)subview;
break;
}
}
if (dot == nil)
{
dot = [[UIImageView alloc] initWithFrame:CGRectMake(0.0f, 0.0f, view.frame.size.width, view.frame.size.height)];
[view addSubview:dot];
}
}
else
{
dot = (UIImageView *) view;
}
return dot;
}
Also, to clear the images that are already there, set the tint colors for the existing indicators to transparent:
- (void)awakeFromNib
{
self.pageIndicatorTintColor = [UIColor clearColor];
self.currentPageIndicatorTintColor = [UIColor clearColor];
}
Hope this will solve ur issue for iOS 7.
Happy coding
Try this link:-
Answer with GrayPageControl:-
Is there a way to change page indicator dots color
It is really good and reliable.I also have used this code.
You might have to do some more customization as
-(void) updateDots
{
for (int i = 0; i < [self.subviews count]; i++)
{
UIImageView* dot = [self.subviews objectAtIndex:i];
if (i == self.currentPage) {
if(i==0) {
dot.image = [UIImage imageNamed:#"activesearch.png"];
} else {
dot.image = activeImage;
}
} else {
if(i==0) {
dot.image = [UIImage imageNamed:#"inactivesearch.png"];
} else {
dot.image = inactiveImage;
}
}
}
}
Simply change the UIPageControl page indicator Color with pattern Image self.pageControl.currentPageIndicatorTintColor = [UIColor colorWithPatternImage:[UIImage imageNamed:#"circle"]];
The best compilation of the code for Swift 3 to replace the first icon of UIPageControl by a location marker:
import UIKit
class LocationPageControl: UIPageControl {
let locationArrow: UIImage = UIImage(named: "locationArrow")!
let pageCircle: UIImage = UIImage(named: "pageCircle")!
override var numberOfPages: Int {
didSet {
updateDots()
}
}
override var currentPage: Int {
didSet {
updateDots()
}
}
override func awakeFromNib() {
super.awakeFromNib()
self.pageIndicatorTintColor = UIColor.clear
self.currentPageIndicatorTintColor = UIColor.clear
self.clipsToBounds = false
}
func updateDots() {
var i = 0
for view in self.subviews {
var imageView = self.imageView(forSubview: view)
if imageView == nil {
if i == 0 {
imageView = UIImageView(image: locationArrow)
} else {
imageView = UIImageView(image: pageCircle)
}
imageView!.center = view.center
view.addSubview(imageView!)
view.clipsToBounds = false
}
if i == self.currentPage {
imageView!.alpha = 1.0
} else {
imageView!.alpha = 0.5
}
i += 1
}
}
fileprivate func imageView(forSubview view: UIView) -> UIImageView? {
var dot: UIImageView?
if let dotImageView = view as? UIImageView {
dot = dotImageView
} else {
for foundView in view.subviews {
if let imageView = foundView as? UIImageView {
dot = imageView
break
}
}
}
return dot
}
}
Attached images:
I've created a custom page controller that should function in mostly the same way without hacking into the internals of a UIPageControl or having a whole library for one small widget.
Just place an empty UIStackView in your storyboard and make its custom class this class below, and use numberOfPages and currentPage just like a normal UIPageControl. Set the spacing on the UIStackView to change how much space there is between the views.
Swift 4.2
/**
If adding via storyboard, you should not need to set a width and height constraint for this view,
just set a placeholder for each so autolayout doesnt complain and this view will size itself once its populated with pages at runtime
*/
class PageControl: UIStackView {
#IBInspectable var currentPageImage: UIImage = UIImage(named: "whiteCircleFilled")!
#IBInspectable var pageImage: UIImage = UIImage(named: "whiteCircleOutlined")!
/**
Sets how many page indicators will show
*/
var numberOfPages = 3 {
didSet {
layoutIndicators()
}
}
/**
Sets which page indicator will be highlighted with the **currentPageImage**
*/
var currentPage = 0 {
didSet {
setCurrentPageIndicator()
}
}
override func awakeFromNib() {
super.awakeFromNib()
axis = .horizontal
distribution = .equalSpacing
alignment = .center
layoutIndicators()
}
private func layoutIndicators() {
for i in 0..<numberOfPages {
let imageView: UIImageView
if i < arrangedSubviews.count {
imageView = arrangedSubviews[i] as! UIImageView // reuse subview if possible
} else {
imageView = UIImageView()
addArrangedSubview(imageView)
}
if i == currentPage {
imageView.image = currentPageImage
} else {
imageView.image = pageImage
}
}
// remove excess subviews if any
let subviewCount = arrangedSubviews.count
if numberOfPages < subviewCount {
for _ in numberOfPages..<subviewCount {
arrangedSubviews.last?.removeFromSuperview()
}
}
}
private func setCurrentPageIndicator() {
for i in 0..<arrangedSubviews.count {
let imageView = arrangedSubviews[i] as! UIImageView
if i == currentPage {
imageView.image = currentPageImage
} else {
imageView.image = pageImage
}
}
}
}
Works for my purposes but I make no guarantees
Update for Swift 3.0 ... you know if you are OK with accepting stated risk: "Modifying the subviews of an existing control is fragile".
import UIKit
class CustomImagePageControl: UIPageControl {
let activeImage:UIImage = UIImage(named: "SelectedPage")!
let inactiveImage:UIImage = UIImage(named: "UnselectedPage")!
override func awakeFromNib() {
super.awakeFromNib()
self.pageIndicatorTintColor = UIColor.clear
self.currentPageIndicatorTintColor = UIColor.clear
self.clipsToBounds = false
}
func updateDots() {
var i = 0
for view in self.subviews {
if let imageView = self.imageForSubview(view) {
if i == self.currentPage {
imageView.image = self.activeImage
} else {
imageView.image = self.inactiveImage
}
i = i + 1
} else {
var dotImage = self.inactiveImage
if i == self.currentPage {
dotImage = self.activeImage
}
view.clipsToBounds = false
view.addSubview(UIImageView(image:dotImage))
i = i + 1
}
}
}
fileprivate func imageForSubview(_ view:UIView) -> UIImageView? {
var dot:UIImageView?
if let dotImageView = view as? UIImageView {
dot = dotImageView
} else {
for foundView in view.subviews {
if let imageView = foundView as? UIImageView {
dot = imageView
break
}
}
}
return dot
}
}
You just need do it like this:
((UIImageView *)[[yourPageControl subviews] objectAtIndex:0]).image=[UIImage imageNamed:#"search.png"];
I think for this you need to customize whole UIPageControl. Please find more out at below links
How can i change the color of pagination dots of UIPageControl?
http://iphoneappcode.blogspot.in/2012/03/custom-uipagecontrol.html
From iOS 14 you can get and set the indicator image with these methods:
#available(iOS 14.0, *)
open func indicatorImage(forPage page: Int) -> UIImage?
#available(iOS 14.0, *)
open func setIndicatorImage(_ image: UIImage?, forPage page: Int)
From iProgrammer's answer
In case you want to hide the original dot
- (UIImageView *)imageViewForSubview:(UIView *)view {
UIImageView * dot = nil;
[view setBackgroundColor:[UIColor clearColor]]; << add this line
Following the previous answers I came up with the solution below.
Keep in mind that I had to add valueChanged listener that calls updateDots() as well in controller to handle taps made on UIPageControl
import UIKit
class PageControl: UIPageControl {
private struct Constants {
static let activeColor: UIColor = .white
static let inactiveColor: UIColor = .black
static let locationImage: UIImage = UIImage(named: "Location")!
}
// Update dots when currentPage changes
override var currentPage: Int {
didSet {
updateDots()
}
}
override func awakeFromNib() {
super.awakeFromNib()
self.pageIndicatorTintColor = .clear
self.currentPageIndicatorTintColor = .clear
}
func updateDots() {
for (index, view) in self.subviews.enumerated() {
// Layers will be redrawn, remove old.
view.layer.sublayers?.removeAll()
if index == 0 {
drawImage(view: view)
} else {
drawDot(index: index, view: view)
}
}
}
private func drawDot(index: Int, view: UIView) {
let dotLayer = CAShapeLayer()
dotLayer.path = UIBezierPath(ovalIn: view.bounds).cgPath
dotLayer.fillColor = getColor(index: index)
view.layer.addSublayer(dotLayer)
}
private func drawImage(view: UIView) {
let height = view.bounds.height * 2
let width = view.bounds.width * 2
let topMargin: CGFloat = -3.5
let maskLayer = CALayer()
maskLayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
maskLayer.contents = Constants.locationImage.cgImage
maskLayer.contentsGravity = .resizeAspect
let imageLayer = CALayer()
imageLayer.frame = CGRect(x:0, y: topMargin, width: width, height: height)
imageLayer.mask = maskLayer
imageLayer.backgroundColor = getColor()
view.backgroundColor = .clear // Otherwise black background
view.layer.addSublayer(imageLayer)
}
private func getColor(index: Int? = 0) -> CGColor {
return currentPage == index ? Constants.activeColor.cgColor : Constants.inactiveColor.cgColor
}
}
Here is my code:
viewDidLoad:
UIPinchGestureRecognizer *pinch = [[UIPinchGestureRecognizer alloc]initWithTarget:self action:#selector(pinch:)];
[self.canvas addGestureRecognizer:pinch];
pinch.delegate = self;
UIRotationGestureRecognizer *twoFingersRotate = [[UIRotationGestureRecognizer alloc] initWithTarget:self action:#selector(pinchRotate:)];
[[self canvas] addGestureRecognizer:twoFingersRotate];
twoFingersRotate.delegate = self;
Code For Pinches and Rotates:
-(BOOL)gestureRecognizer:(UIGestureRecognizer *)gestureRecognizer shouldRecognizeSimultaneouslyWithGestureRecognizer:(UIGestureRecognizer *)otherGestureRecognizer
{
return YES;
}
-(void)pinchRotate:(UIRotationGestureRecognizer*)rotate
{
SMImage *selectedImage = [DataCenter sharedDataCenter].selectedImage;
switch (rotate.state)
{
case UIGestureRecognizerStateBegan:
{
selectedImage.referenceTransform = selectedImage.transform;
break;
}
case UIGestureRecognizerStateChanged:
{
selectedImage.transform = CGAffineTransformRotate(selectedImage.referenceTransform, ([rotate rotation] * 55) * M_PI/180);
break;
}
default:
break;
}
}
-(void)pinch:(UIPinchGestureRecognizer*)pinch
{
SMImage *selectedImage = [DataCenter sharedDataCenter].selectedImage;
[self itemSelected];
switch (pinch.state)
{
case UIGestureRecognizerStateBegan:
{
selectedImage.referenceTransform = selectedImage.transform;
break;
}
case UIGestureRecognizerStateChanged:
{
CGAffineTransform transform = CGAffineTransformScale(selectedImage.referenceTransform, pinch.scale, pinch.scale);
selectedImage.transform = transform;
break;
}
default:
break;
}
}
My rotation works great on its own and my scale works great on its own, but they wont work together. One always works or the other doesn't. When I implement shouldRecognizeSimultaneouslyWithGestureRecognizer the two gestures seem to fight against each other and produce poor results. What am I missing? (Yes I have implemented <UIGestureRecognizerDelegate>)
Every time pinch: is called, you just compute the transform based on the pinch recognizer's scale. Every time pinchRotate: is called, you just compute the transform based on the rotation recognizer's rotation. You never combine the scale and the rotation into one transform.
Here's an approach. Give yourself one new instance variable, _activeRecognizers:
NSMutableSet *_activeRecognizers;
Initialize it in viewDidLoad:
_activeRecognizers = [NSMutableSet set];
Use one method as the action for both recognizers:
- (IBAction)handleGesture:(UIGestureRecognizer *)recognizer
{
SMImage *selectedImage = [DataCenter sharedDataCenter].selectedImage;
switch (recognizer.state) {
case UIGestureRecognizerStateBegan:
if (_activeRecognizers.count == 0)
selectedImage.referenceTransform = selectedImage.transform;
[_activeRecognizers addObject:recognizer];
break;
case UIGestureRecognizerStateEnded:
selectedImage.referenceTransform = [self applyRecognizer:recognizer toTransform:selectedImage.referenceTransform];
[_activeRecognizers removeObject:recognizer];
break;
case UIGestureRecognizerStateChanged: {
CGAffineTransform transform = selectedImage.referenceTransform;
for (UIGestureRecognizer *recognizer in _activeRecognizers)
transform = [self applyRecognizer:recognizer toTransform:transform];
selectedImage.transform = transform;
break;
}
default:
break;
}
}
You'll need this helper method:
- (CGAffineTransform)applyRecognizer:(UIGestureRecognizer *)recognizer toTransform:(CGAffineTransform)transform
{
if ([recognizer respondsToSelector:#selector(rotation)])
return CGAffineTransformRotate(transform, [(UIRotationGestureRecognizer *)recognizer rotation]);
else if ([recognizer respondsToSelector:#selector(scale)]) {
CGFloat scale = [(UIPinchGestureRecognizer *)recognizer scale];
return CGAffineTransformScale(transform, scale, scale);
}
else
return transform;
}
This works if you're just allowing rotating and scaling. (I even tested it!)
If you want to add panning, use a separate action method and just adjust selectedImage.center. Trying to do panning with rotation and scaling using selectedImage.transform is much more complicated.
Swift 3 with Pan, Rotate and Pinch
// MARK: - Gesturies
func transformUsingRecognizer(_ recognizer: UIGestureRecognizer, transform: CGAffineTransform) -> CGAffineTransform {
if let rotateRecognizer = recognizer as? UIRotationGestureRecognizer {
return transform.rotated(by: rotateRecognizer.rotation)
}
if let pinchRecognizer = recognizer as? UIPinchGestureRecognizer {
let scale = pinchRecognizer.scale
return transform.scaledBy(x: scale, y: scale)
}
if let panRecognizer = recognizer as? UIPanGestureRecognizer {
let deltaX = panRecognizer.translation(in: imageView).x
let deltaY = panRecognizer.translation(in: imageView).y
return transform.translatedBy(x: deltaX, y: deltaY)
}
return transform
}
var initialTransform: CGAffineTransform?
var gestures = Set<UIGestureRecognizer>(minimumCapacity: 3)
#IBAction func processTransform(_ sender: Any) {
let gesture = sender as! UIGestureRecognizer
switch gesture.state {
case .began:
if gestures.count == 0 {
initialTransform = imageView.transform
}
gestures.insert(gesture)
case .changed:
if var initial = initialTransform {
gestures.forEach({ (gesture) in
initial = transformUsingRecognizer(gesture, transform: initial)
})
imageView.transform = initial
}
case .ended:
gestures.remove(gesture)
default:
break
}
}
func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIGestureRecognizer) -> Bool {
return true
}
For this to happen you need to implement gesture delegate shouldRecognizeSimultaneouslyWithGestureRecognizer and put what gestures you would like to recognize simultaneously.
// ensure that the pinch and rotate gesture recognizers on a particular view can all recognize simultaneously
// prevent other gesture recognizers from recognizing simultaneously
- (BOOL)gestureRecognizer:(UIGestureRecognizer *)gestureRecognizer shouldRecognizeSimultaneouslyWithGestureRecognizer:(UIGestureRecognizer *)otherGestureRecognizer
{
// if the gesture recognizers's view isn't one of our views, don't allow simultaneous recognition
if (gestureRecognizer.view != firstView && gestureRecognizer.view != secondView)
return NO;
// if the gesture recognizers are on different views, don't allow simultaneous recognition
if (gestureRecognizer.view != otherGestureRecognizer.view)
return NO;
// if either of the gesture recognizers is the long press, don't allow simultaneous recognition
if ([gestureRecognizer isKindOfClass:[UILongPressGestureRecognizer class]] || [otherGestureRecognizer isKindOfClass:[UILongPressGestureRecognizer class]])
return NO;
return YES;
}
This code needs to be modified to the view for which you want simultaneous gesture recognisers. The above code is what you need.
This example does not use gesture recognizers, and directly computes the transformation matrix. It also properly handles one-to-two finger transitions.
class PincherView: UIView {
override var bounds :CGRect {
willSet(newBounds) {
oldBounds = self.bounds
} didSet {
self.imageLayer.position = ┼self.bounds
self._adjustScaleForBoundsChange()
}
}
var oldBounds :CGRect
var touch₁ :UITouch?
var touch₂ :UITouch?
var p₁ :CGPoint? // point 1 in image coordiate system
var p₂ :CGPoint? // point 2 in image coordinate system
var p₁ʹ :CGPoint? // point 1 in view coordinate system
var p₂ʹ :CGPoint? // point 2 in view coordinate system
var image :UIImage? {
didSet {self._reset()}
}
var imageLayer :CALayer
var imageTransform :CGAffineTransform {
didSet {
self.backTransform = self.imageTransform.inverted()
self.imageLayer.transform = CATransform3DMakeAffineTransform(self.imageTransform)
}
}
var backTransform :CGAffineTransform
var solutionMatrix :HXMatrix?
required init?(coder aDecoder: NSCoder) {
self.oldBounds = CGRect.zero
let layer = CALayer();
self.imageLayer = layer
self.imageTransform = CGAffineTransform.identity
self.backTransform = CGAffineTransform.identity
super.init(coder: aDecoder)
self.oldBounds = self.bounds
self.isMultipleTouchEnabled = true
self.layer.addSublayer(layer)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
let pʹ = touch.location(in: self).applying(self._backNormalizeTransform())
let p = pʹ.applying(self.backTransform)
if self.touch₁ == nil {
self.touch₁ = touch
self.p₁ʹ = pʹ
self.p₁ = p
} else if self.touch₂ == nil {
self.touch₂ = touch
self.p₂ʹ = pʹ
self.p₂ = p
}
}
self.solutionMatrix = self._computeSolutionMatrix()
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
let pʹ = touch.location(in: self).applying(self._backNormalizeTransform())
if self.touch₁ == touch {
self.p₁ʹ = pʹ
} else if self.touch₂ == touch {
self.p₂ʹ = pʹ
}
}
CATransaction.begin()
CATransaction.setValue(true, forKey:kCATransactionDisableActions)
// Whether you're using 1 finger or 2 fingers
if let q₁ʹ = self.p₁ʹ, let q₂ʹ = self.p₂ʹ {
self.imageTransform = self._computeTransform(q₁ʹ, q₂ʹ)
} else if let q₁ʹ = (self.p₁ʹ != nil ? self.p₁ʹ : self.p₂ʹ) {
self.imageTransform = self._computeTransform(q₁ʹ, CGPoint(x:q₁ʹ.x + 10, y:q₁ʹ.y + 10))
}
CATransaction.commit()
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in touches {
if self.touch₁ == touch {
self.touch₁ = nil
self.p₁ = nil
self.p₁ʹ = nil
} else if self.touch₂ == touch {
self.touch₂ = nil
self.p₂ = nil
self.p₂ʹ = nil
}
}
self.solutionMatrix = self._computeSolutionMatrix()
}
//MARK: Private Methods
private func _reset() {
guard
let image = self.image,
let cgimage = image.cgImage else {
return
}
let r = CGRect(x:0, y:0, width:cgimage.width, height:cgimage.height)
imageLayer.contents = cgimage;
imageLayer.bounds = r
imageLayer.position = ┼self.bounds
self.imageTransform = self._initialTransform()
}
private func _normalizeTransform() -> CGAffineTransform {
let center = ┼self.bounds
return CGAffineTransform(translationX: center.x, y: center.y)
}
private func _backNormalizeTransform() -> CGAffineTransform {
return self._normalizeTransform().inverted();
}
private func _initialTransform() -> CGAffineTransform {
guard let image = self.image, let cgimage = image.cgImage else {
return CGAffineTransform.identity;
}
let r = CGRect(x:0, y:0, width:cgimage.width, height:cgimage.height)
let s = r.scaleIn(rect: self.bounds)
return CGAffineTransform(scaleX: s, y: s)
}
private func _adjustScaleForBoundsChange() {
guard let image = self.image, let cgimage = image.cgImage else {
return
}
let r = CGRect(x:0, y:0, width:cgimage.width, height:cgimage.height)
let oldIdeal = r.scaleAndCenterIn(rect: self.oldBounds)
let newIdeal = r.scaleAndCenterIn(rect: self.bounds)
let s = newIdeal.height / oldIdeal.height
self.imageTransform = self.imageTransform.scaledBy(x: s, y: s)
}
private func _computeSolutionMatrix() -> HXMatrix? {
if let q₁ = self.p₁, let q₂ = self.p₂ {
return _computeSolutionMatrix(q₁, q₂)
} else if let q₁ = self.p₁, let q₁ʹ = self.p₁ʹ {
let q₂ = CGPoint(x: q₁ʹ.x + 10, y: q₁ʹ.y + 10).applying(self.backTransform)
return _computeSolutionMatrix(q₁, q₂)
} else if let q₂ = self.p₂, let q₂ʹ = self.p₂ʹ {
let q₁ = CGPoint(x: q₂ʹ.x + 10, y: q₂ʹ.y + 10).applying(self.backTransform)
return _computeSolutionMatrix(q₂, q₁)
}
return nil
}
private func _computeSolutionMatrix(_ q₁:CGPoint, _ q₂:CGPoint) -> HXMatrix {
let x₁ = Double(q₁.x)
let y₁ = Double(q₁.y)
let x₂ = Double(q₂.x)
let y₂ = Double(q₂.y)
let A = HXMatrix(rows: 4, columns: 4, values:[
x₁, -y₁, 1, 0,
y₁, x₁, 0, 1,
x₂, -y₂, 1, 0,
y₂, x₂, 0, 1
])
return A.inverse()
}
private func _computeTransform(_ q₁ʹ:CGPoint, _ q₂ʹ:CGPoint) -> CGAffineTransform {
guard let solutionMatrix = self.solutionMatrix else {
return CGAffineTransform.identity
}
let B = HXMatrix(rows: 4, columns: 1, values: [
Double(q₁ʹ.x),
Double(q₁ʹ.y),
Double(q₂ʹ.x),
Double(q₂ʹ.y)
])
let C = solutionMatrix ⋅ B
let U = CGFloat(C[0,0])
let V = CGFloat(C[1,0])
let tx = CGFloat(C[2,0])
let ty = CGFloat(C[3,0])
var t :CGAffineTransform = CGAffineTransform.identity
t.a = U; t.b = V
t.c = -V; t.d = U
t.tx = tx; t.ty = ty
return t
}
}