I am developing an iphone application where I directly use AVFoundation to capture videos via the camera.
I've implemented a feature to enable the tap to focus function for a user.
- (void) focus:(CGPoint) aPoint;
{
#if HAS_AVFF
Class captureDeviceClass = NSClassFromString(#"AVCaptureDevice");
if (captureDeviceClass != nil) {
AVCaptureDevice *device = [captureDeviceClass defaultDeviceWithMediaType:AVMediaTypeVideo];
if([device isFocusPointOfInterestSupported] &&
[device isFocusModeSupported:AVCaptureFocusModeAutoFocus]) {
CGRect screenRect = [[UIScreen mainScreen] bounds];
double screenWidth = screenRect.size.width;
double screenHeight = screenRect.size.height;
double focus_x = aPoint.x/screenWidth;
double focus_y = aPoint.y/screenHeight;
if([device lockForConfiguration:nil]) {
[device setFocusPointOfInterest:CGPointMake(focus_x,focus_y)];
[device setFocusMode:AVCaptureFocusModeAutoFocus];
if ([device isExposureModeSupported:AVCaptureExposureModeAutoExpose]){
[device setExposureMode:AVCaptureExposureModeAutoExpose];
}
[device unlockForConfiguration];
}
}
}
#endif
}
So far so good, but I am missing the feedback rectangle like in the photos app. Is there any way to tell the AVFoundation Framework to show this feedback rectangle or do I have to implement this feature myself?
Here's what I did:
This is the class that creates the square that is shown when the user taps on the camera overlay.
CameraFocusSquare.h
#import <UIKit/UIKit.h>
#interface CameraFocusSquare : UIView
#end
CameraFocusSquare.m
#import "CameraFocusSquare.h"
#import <QuartzCore/QuartzCore.h>
const float squareLength = 80.0f;
#implementation FBKCameraFocusSquare
- (id)initWithFrame:(CGRect)frame
{
self = [super initWithFrame:frame];
if (self) {
// Initialization code
[self setBackgroundColor:[UIColor clearColor]];
[self.layer setBorderWidth:2.0];
[self.layer setCornerRadius:4.0];
[self.layer setBorderColor:[UIColor whiteColor].CGColor];
CABasicAnimation* selectionAnimation = [CABasicAnimation
animationWithKeyPath:#"borderColor"];
selectionAnimation.toValue = (id)[UIColor blueColor].CGColor;
selectionAnimation.repeatCount = 8;
[self.layer addAnimation:selectionAnimation
forKey:#"selectionAnimation"];
}
return self;
}
#end
And in the view where you receive your taps, do the following:
- (void) touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event
{
UITouch *touch = [[event allTouches] anyObject];
CGPoint touchPoint = [touch locationInView:touch.view];
[self focus:touchPoint];
if (camFocus)
{
[camFocus removeFromSuperview];
}
if ([[touch view] isKindOfClass:[FBKVideoRecorderView class]])
{
camFocus = [[CameraFocusSquare alloc]initWithFrame:CGRectMake(touchPoint.x-40, touchPoint.y-40, 80, 80)];
[camFocus setBackgroundColor:[UIColor clearColor]];
[self addSubview:camFocus];
[camFocus setNeedsDisplay];
[UIView beginAnimations:nil context:NULL];
[UIView setAnimationDuration:1.5];
[camFocus setAlpha:0.0];
[UIView commitAnimations];
}
}
- (void) focus:(CGPoint) aPoint;
{
Class captureDeviceClass = NSClassFromString(#"AVCaptureDevice");
if (captureDeviceClass != nil) {
AVCaptureDevice *device = [captureDeviceClass defaultDeviceWithMediaType:AVMediaTypeVideo];
if([device isFocusPointOfInterestSupported] &&
[device isFocusModeSupported:AVCaptureFocusModeAutoFocus]) {
CGRect screenRect = [[UIScreen mainScreen] bounds];
double screenWidth = screenRect.size.width;
double screenHeight = screenRect.size.height;
double focus_x = aPoint.x/screenWidth;
double focus_y = aPoint.y/screenHeight;
if([device lockForConfiguration:nil]) {
[device setFocusPointOfInterest:CGPointMake(focus_x,focus_y)];
[device setFocusMode:AVCaptureFocusModeAutoFocus];
if ([device isExposureModeSupported:AVCaptureExposureModeAutoExpose]){
[device setExposureMode:AVCaptureExposureModeAutoExpose];
}
[device unlockForConfiguration];
}
}
}
}
Adding to Anil's brilliant answer: Instead of doing the calculations yourself, you should have a look at AVCaptureVideoPreviewLayer's captureDevicePointOfInterestForPoint:. It will give you a much more consistent focus point (available from iOS 6 and forward).
- (void) focus:(CGPoint) aPoint;
{
Class captureDeviceClass = NSClassFromString(#"AVCaptureDevice");
if (captureDeviceClass != nil) {
AVCaptureDevice *device = [captureDeviceClass defaultDeviceWithMediaType:AVMediaTypeVideo];
if([device isFocusPointOfInterestSupported] &&
[device isFocusModeSupported:AVCaptureFocusModeAutoFocus]) {
CGPoint focusPoint = [self.captureVideoPreviewLayer captureDevicePointOfInterestForPoint:aPoint];
if([device lockForConfiguration:nil]) {
[device setFocusPointOfInterest:CGPointMake(focusPoint.x,focusPoint.y)];
[device setFocusMode:AVCaptureFocusModeAutoFocus];
if ([device isExposureModeSupported:AVCaptureExposureModeAutoExpose]){
[device setExposureMode:AVCaptureExposureModeAutoExpose];
}
[device unlockForConfiguration];
}
}
}
}
The documentation is available here:
https://developer.apple.com/library/ios/documentation/AVFoundation/Reference/AVCaptureVideoPreviewLayer_Class/index.html#//apple_ref/occ/instm/AVCaptureVideoPreviewLayer/captureDevicePointOfInterestForPoint:
Swift implementation:
CameraFocusSquare view:
class CameraFocusSquare: UIView,CAAnimationDelegate {
internal let kSelectionAnimation:String = "selectionAnimation"
fileprivate var _selectionBlink: CABasicAnimation?
convenience init(touchPoint: CGPoint) {
self.init()
self.updatePoint(touchPoint)
self.backgroundColor = UIColor.clear
self.layer.borderWidth = 2.0
self.layer.borderColor = UIColor.orange.cgColor
initBlink()
}
override init(frame: CGRect) {
super.init(frame: frame)
}
fileprivate func initBlink() {
// create the blink animation
self._selectionBlink = CABasicAnimation(keyPath: "borderColor")
self._selectionBlink!.toValue = (UIColor.white.cgColor as AnyObject)
self._selectionBlink!.repeatCount = 3
// number of blinks
self._selectionBlink!.duration = 0.4
// this is duration per blink
self._selectionBlink!.delegate = self
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
/**
Updates the location of the view based on the incoming touchPoint.
*/
func updatePoint(_ touchPoint: CGPoint) {
let squareWidth: CGFloat = 100
let frame: CGRect = CGRect(x: touchPoint.x - squareWidth / 2, y: touchPoint.y - squareWidth / 2, width: squareWidth, height: squareWidth)
self.frame = frame
}
/**
This unhides the view and initiates the animation by adding it to the layer.
*/
func animateFocusingAction() {
if let blink = _selectionBlink {
// make the view visible
self.alpha = 1.0
self.isHidden = false
// initiate the animation
self.layer.add(blink, forKey: kSelectionAnimation)
}
}
/**
Hides the view after the animation stops. Since the animation is automatically removed, we don't need to do anything else here.
*/
public func animationDidStop(_ anim: CAAnimation, finished flag: Bool){
if flag{
// hide the view
self.alpha = 0.0
self.isHidden = true
}
}
}
Gesture action:
open func tapToFocus(_ gesture : UILongPressGestureRecognizer) {
if (gesture.state == UIGestureRecognizerState.began) {
let touchPoint:CGPoint = gesture.location(in: self.previewView)
if let fsquare = self.focusSquare {
fsquare.updatePoint(touchPoint)
}else{
self.focusSquare = CameraFocusSquare(touchPoint: touchPoint)
self.previewView.addSubview(self.focusSquare!)
self.focusSquare?.setNeedsDisplay()
}
self.focusSquare?.animateFocusingAction()
let convertedPoint:CGPoint = self.previewLayer!.captureDevicePointOfInterest(for: touchPoint)
let currentDevice:AVCaptureDevice = self.videoDeviceInput!.device
if currentDevice.isFocusPointOfInterestSupported && currentDevice.isFocusModeSupported(AVCaptureFocusMode.autoFocus){
do {
try currentDevice.lockForConfiguration()
currentDevice.focusPointOfInterest = convertedPoint
currentDevice.focusMode = AVCaptureFocusMode.autoFocus
if currentDevice.isExposureModeSupported(AVCaptureExposureMode.continuousAutoExposure){
currentDevice.exposureMode = AVCaptureExposureMode.continuousAutoExposure
}
currentDevice.isSubjectAreaChangeMonitoringEnabled = true
currentDevice.unlockForConfiguration()
} catch {
}
}
}
}
#Anil's answer is a great start, but it didn't work for me. I wanted to be able to have the user continue to be able to select a focus point, instead of only once (which is what his solution does). Thanks to #Anil for pointing me in the right direction.
There are some differences with my solution.
I wanted to be able to reuse the focus square and animation, rather than only a single time.
I wanted the animation to disappear after it completed (something that I couldn't get #Anil's solution to do.
Instead of using initWithFrame:, I implemented my own initWithTouchPoint:.
I have a method for specifically animating the focus action.
I also have a method for updating the location of the frame.
The size of the frame is within CameraFocusSquare, which means that it's easier to find and update the size as needed.
CameraFocusSquare.h
#import UIKit;
#interface CameraFocusSquare : UIView
- (instancetype)initWithTouchPoint:(CGPoint)touchPoint;
- (void)updatePoint:(CGPoint)touchPoint;
- (void)animateFocusingAction;
#end
CameraFocusSquare.m
#import "CameraFocusSquare.h"
#implementation CameraFocusSquare {
CABasicAnimation *_selectionBlink;
}
/**
This is the init method for the square. It sets the frame for the view and sets border parameters. It also creates the blink animation.
*/
- (instancetype)initWithTouchPoint:(CGPoint)touchPoint {
self = [self init];
if (self) {
[self updatePoint:touchPoint];
self.backgroundColor = [UIColor clearColor];
self.layer.borderWidth = 2.0f;
self.layer.borderColor = [UIColor orangeColor].CGColor;
// create the blink animation
_selectionBlink = [CABasicAnimation
animationWithKeyPath:#"borderColor"];
_selectionBlink.toValue = (id)[UIColor whiteColor].CGColor;
_selectionBlink.repeatCount = 3; // number of blinks
_selectionBlink.duration = 0.4; // this is duration per blink
_selectionBlink.delegate = self;
}
return self;
}
/**
Updates the location of the view based on the incoming touchPoint.
*/
- (void)updatePoint:(CGPoint)touchPoint {
CGFloat squareWidth = 50;
CGRect frame = CGRectMake(touchPoint.x - squareWidth/2, touchPoint.y - squareWidth/2, squareWidth, squareWidth);
self.frame = frame;
}
/**
This unhides the view and initiates the animation by adding it to the layer.
*/
- (void)animateFocusingAction {
// make the view visible
self.alpha = 1.0f;
self.hidden = NO;
// initiate the animation
[self.layer addAnimation:_selectionBlink forKey:#"selectionAnimation"];
}
/**
Hides the view after the animation stops. Since the animation is automatically removed, we don't need to do anything else here.
*/
- (void)animationDidStop:(CAAnimation *)animation finished:(BOOL)flag {
// hide the view
self.alpha = 0.0f;
self.hidden = YES;
}
#end
I initiate all of this on top of a view. This allows me greater flexibility and separates my UI code from my controller code (think MVC).
PreviewView.h
#import UIKit;
#interface PreviewView : UIView
- (IBAction)tapToFocus:(UITapGestureRecognizer *)gestureRecognizer;
#end
PreviewView.m
#import "PreviewView.h"
#import "CameraFocusSquare.h"
#implementation PreviewView {
CameraFocusSquare *_focusSquare;
}
- (IBAction)tapToFocus:(UITapGestureRecognizer *)gestureRecognizer {
CGPoint touchPoint = [gestureRecognizer locationOfTouch:0 inView:self];
if (!_focusSquare) {
_focusSquare = [[CameraFocusSquare alloc] initWithTouchPoint:touchPoint];
[self addSubview:_focusSquare];
[_focusSquare setNeedsDisplay];
}
else {
[_focusSquare updatePoint:touchPoint];
}
[_focusSquare animateFocusingAction];
}
#end
Finally, in my UIViewController subclass, I have my UITapGestureRecognizer created and attached to the view. I also implement my tap-to-focus code here.
CameraViewController.m
- (void)viewDidLoad {
// do other initialization stuff here
// create the tap-to-focus gesture
UITapGestureRecognizer *tapToFocusRecognizer = [[UITapGestureRecognizer alloc] initWithTarget:self action:#selector(tapToFocus:)];
tapToFocusRecognizer.numberOfTapsRequired = 1;
tapToFocusRecognizer.numberOfTouchesRequired = 1;
[self.previewView addGestureRecognizer:tapToFocusRecognizer];
}
- (IBAction)tapToFocus:(UITapGestureRecognizer *)tapGestureRecognizer {
if (!_captureDevice) {
return;
}
if (![_captureDevice isFocusPointOfInterestSupported]) {
return;
}
if (![_captureDevice isFocusModeSupported:AVCaptureFocusModeAutoFocus]) {
return;
}
[self.previewView tapToFocus:tapGestureRecognizer];
NSError *error;
[_captureDevice lockForConfiguration:&error];
if (error) {
NSLog(#"Error trying to lock configuration of camera. %#", [error localizedDescription]);
return;
}
CGPoint touchPoint = [tapGestureRecognizer locationOfTouch:0 inView:self.cameraView];
// range of touch point is from (0,0) to (1,1)
CGFloat touchX = touchPoint.x / self.previewView.frame.size.width;
CGFloat touchY = touchPoint.y / self.previewView.frame.size.height;
_captureDevice.focusMode = AVCaptureFocusModeAutoFocus;
if ([_captureDevice isExposureModeSupported:AVCaptureExposureModeAutoExpose]) {
_captureDevice.exposureMode = AVCaptureExposureModeAutoExpose;
}
_captureDevice.focusPointOfInterest = CGPointMake(touchX, touchY);
if ([_captureDevice isExposurePointOfInterestSupported]) {
_captureDevice.exposurePointOfInterest = CGPointMake(touchX, touchY);
}
[_captureDevice unlockForConfiguration];
}
Hope this helps people so they can move onto more important code!
Here's a basic Swift view that will show an animated focus square. Just add it to your camera view and hook it up to the focus callback from a tap gesture recognizer.
#objc func didTapToFocus(gesture: UITapGestureRecognizer) {
let pointInViewCoordinates = gesture.location(in: gesture.view)
let pointInCameraCoordinates = cameraView.videoPreviewLayer.captureDevicePointConverted(fromLayerPoint: pointInViewCoordinates)
camera.focusOn(pointInCameraCoordinates: pointInCameraCoordinates)
cameraView.showFocusBox(at: pointInViewCoordinates)
}
Focus view:
final class CameraFocusBoxView: UIView {
// MARK: - Instantiation
init() {
super.init(frame: .zero)
backgroundColor = .clear
layer.addSublayer(focusBoxLayer)
}
// MARK: - API
/// This zooms/fades in a focus square and blinks it a few times, then slowly fades it out
func showBox(at point: CGPoint) {
focusBoxLayer.removeAllAnimations()
let scaleKey = "zoom in focus box"
let fadeInKey = "fade in focus box"
let pulseKey = "pulse focus box"
let fadeOutKey = "fade out focus box"
guard focusBoxLayer.animation(forKey: scaleKey) == nil,
focusBoxLayer.animation(forKey: fadeInKey) == nil,
focusBoxLayer.animation(forKey: pulseKey) == nil,
focusBoxLayer.animation(forKey: fadeOutKey) == nil
else { return }
CATransaction.begin()
CATransaction.setDisableActions(true)
focusBoxLayer.position = point
CATransaction.commit()
let scale = CABasicAnimation(keyPath: "transform.scale")
scale.fromValue = 1
scale.toValue = 0.375
scale.duration = 0.3
scale.isRemovedOnCompletion = false
scale.fillMode = .forwards
let opacityFadeIn = CABasicAnimation(keyPath: "opacity")
opacityFadeIn.fromValue = 0
opacityFadeIn.toValue = 1
opacityFadeIn.duration = 0.3
opacityFadeIn.isRemovedOnCompletion = false
opacityFadeIn.fillMode = .forwards
let pulsing = CABasicAnimation(keyPath: "borderColor")
pulsing.toValue = UIColor(white: 1, alpha: 0.5).cgColor
pulsing.repeatCount = 2
pulsing.duration = 0.2
pulsing.beginTime = CACurrentMediaTime() + 0.3 // wait for the fade in to occur
let opacityFadeOut = CABasicAnimation(keyPath: "opacity")
opacityFadeOut.fromValue = 1
opacityFadeOut.toValue = 0
opacityFadeOut.duration = 0.5
opacityFadeOut.beginTime = CACurrentMediaTime() + 2 // seconds
opacityFadeOut.isRemovedOnCompletion = false
opacityFadeOut.fillMode = .forwards
focusBoxLayer.add(scale, forKey: scaleKey)
focusBoxLayer.add(opacityFadeIn, forKey: fadeInKey)
focusBoxLayer.add(pulsing, forKey: pulseKey)
focusBoxLayer.add(opacityFadeOut, forKey: fadeOutKey)
}
// MARK: - Private Properties
private lazy var focusBoxLayer: CALayer = {
let box = CALayer()
box.bounds = CGRect(x: 0, y: 0, width: 200, height: 200)
box.borderWidth = 2
box.borderColor = UIColor.white.cgColor
box.opacity = 0
return box
}()
// MARK: - Unsupported Initializers
required init?(coder aDecoder: NSCoder) { fatalError("init(coder:) has not been implemented") }
}
Related
I am trying to stick a ball to a spinner when they collide at the contact point. However, it seems as though the didBeginContact is being called before the contact starts. Image is below showing them both spinning together but there is a big space.
Code is below:
#import "GameScene.h"
#implementation GameScene
#synthesize _flowIsON;
NSString *const kFlowTypeRed = #"RED_FLOW_PARTICLE";
const float kRED_DELAY_BETWEEN_PARTICLE_DROP = 0.01; //delay for particle drop in seconds
static const uint32_t kRedParticleCategory = 0x1 << 0;
static const uint32_t kSpinnnerCategory = 0x1 << 1;
NSString *const kStartBtn = #"START_BTN";
NSString *const kLever = #"Lever";
NSString *const START_BTN_TEXT = #"Start Game";
CFTimeInterval lastTime;
-(void)didMoveToView:(SKView *)view {
_bkgNode = (SKSpriteNode *)[self.scene childNodeWithName:#"Background"];
[self initializeScene];
}
-(void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event {
for (UITouch *touch in touches) {
CGPoint location = [touch locationInNode: self];
SKNode *node = [self nodeAtPoint:location];
if ([node.name isEqualToString:kStartBtn]) {
[node removeFromParent];
//initalize to ON
_flowIsON = YES;
//[self initializeScene];
}
}
}
- (void)touchesMoved:(NSSet *)touches withEvent:(UIEvent *)event {
}
-(void)update:(CFTimeInterval)currentTime {
float deltaTimeInSeconds = currentTime - lastTime;
//NSLog(#"Time is %f and flow is %d",deltaTimeInSeconds, _flowIsON);
if ((deltaTimeInSeconds > kRED_DELAY_BETWEEN_PARTICLE_DROP)) {
//TBD
SKAction *rotation = [SKAction rotateByAngle: M_PI/8.0 duration:0];
[_spinner runAction:rotation];
//only if its been past 1 second do we set the lasttime to the current time
lastTime = currentTime;
}
}
- (void) initializeScene {
self.physicsWorld.contactDelegate = self;
//create ball
SKSpriteNode *ball = [SKSpriteNode spriteNodeWithImageNamed:#"Ball"];
ball.size = CGSizeMake(50, 50);
ball.position = CGPointMake(320, 1050);
ball.zPosition = 1;
ball.physicsBody = [SKPhysicsBody bodyWithCircleOfRadius:25];
ball.physicsBody.restitution = 0.0;
ball.physicsBody.categoryBitMask = kRedParticleCategory;
ball.physicsBody.contactTestBitMask = kSpinnnerCategory;
ball.physicsBody.collisionBitMask = kSpinnnerCategory;
ball.name = #"Ball";
NSLog(#"Ball size is %f",ball.size.width);
[self addChild:ball];
//Create spinner
_spinner = [SKSpriteNode spriteNodeWithImageNamed:#"Spinner"];
_spinner.size = CGSizeMake(300, 300);
_spinner.position = CGPointMake(320, 500);
_spinner.zPosition = 1;
_spinner.physicsBody = [SKPhysicsBody bodyWithCircleOfRadius:150];
_spinner.physicsBody.affectedByGravity = NO;
_spinner.physicsBody.allowsRotation = YES;
_spinner.physicsBody.dynamic = NO;
_spinner.physicsBody.restitution = 0.0;
_spinner.physicsBody.categoryBitMask = kSpinnnerCategory;
_spinner.physicsBody.contactTestBitMask = kRedParticleCategory;
_spinner.physicsBody.collisionBitMask = kRedParticleCategory;
_spinner.name = #"Spinner";
[self addChild:_spinner];
//create pipe
// CGPoint center = CGPointMake(400, 600) ;
//
// UIBezierPath *bezierPath = [UIBezierPath bezierPath];
// [bezierPath addArcWithCenter:center radius:400 startAngle:1.825777 endAngle:2.011118 clockwise:YES];
// [bezierPath addLineToPoint:center];
// [bezierPath closePath];
//
// SKShapeNode *shapeNode = [SKShapeNode shapeNodeWithPath:bezierPath.CGPath];
// shapeNode.strokeColor = [UIColor whiteColor];
// shapeNode.fillColor = [UIColor whiteColor];
// [self addChild:shapeNode];
}
# pragma mark -- SKPhysicsContactDelegate Methods
- (void)didBeginContact:(SKPhysicsContact *) contact {
if ([contact.bodyA.node.name isEqualToString:#"Ball"] && [contact.bodyB.node.name isEqualToString:#"Spinner"]) {
[self connectNode1:(SKSpriteNode *)contact.bodyA.node toNode2:(SKSpriteNode *)contact.bodyB.node withContact:contact];
}
}
- (void)didEndContact:(SKPhysicsContact *) contact {
//NSLog(#"didEndContact called");
}
- (void) connectNode1:(SKSpriteNode *)node1 toNode2:(SKSpriteNode *)node2 withContact: (SKPhysicsContact *)contact
{
SKPhysicsJointFixed *joint = [SKPhysicsJointFixed jointWithBodyA:node1.physicsBody
bodyB:node2.physicsBody
anchor:node2.position];
[self.physicsWorld addJoint:joint];
}
#end
If I comment out the did begin contact method you can see the images are correctly sized becuase when they collide they rest on each other perfectly.
How come the contact.contactPoint is not the same as the point at which both bodys are colliding when I comment out the didEnterContact method? Any idea how to fix?
'didBeginContact' is not called too early.. the problem is that actions are evaluated earlier in the scene cycle: first 'didEvaluateActions()' then 'didSimulatePhysics()'. So, even though your SKactions may look correct, there comes a physics evaluation afterwards.. I suggest not to use actions to make rotation corrections when working with the physics engine.. Perhaps use constraints, those come after the physics update..
I am trying to zoom in and out an imageView
here is the code
- (void)pinch:(UIPinchGestureRecognizer *)gesture
{
if (handSelected == YES)
{
if (gesture.state == UIGestureRecognizerStateEnded || gesture.state == UIGestureRecognizerStateChanged)
{
NSLog(#"gesture.scale = %f", gesture.scale);
CGFloat currentScale = self.imgHand.frame.size.width / self.imgHand.bounds.size.width;
CGFloat newScale = currentScale * gesture.scale;
if (newScale < 1.0) {
newScale = 1.0;
}
if (newScale > 4.0) {
newScale = 4.0;
}
CGAffineTransform transform = CGAffineTransformMakeScale(newScale, newScale);
self.imgHand.transform = transform;
gesture.scale = 1;
}
}
}
- (void)viewDidLoad
{
[super viewDidLoad];
[self adjustRingPressed:self];
self.view.multipleTouchEnabled = YES;
self.imgHand.multipleTouchEnabled = YES;
UIPinchGestureRecognizer *gst = [[UIPinchGestureRecognizer alloc] initWithTarget:self.imgHand action:#selector(pinch:)];
[gst setDelegate:self];
[self.imgHand addGestureRecognizer:gst];
}
it seems that my pinch Code never runs
try to add:
self.imgHand.userInteractionEnabled = TRUE;
by default UIImageView has userInteractionEnabled = FALSE
make it a function of gesturelike template code below:
- (IBAction)handlePinch:(UIPinchGestureRecognizer *)recognizer
{
recognizer.view.transform = CGAffineTransformScale(recognizer.view.transform, recognizer.scale, recognizer.scale);
recognizer.scale = 1;
}
I have this code that works perfectly on an iPad2 but it scales wrong on a Retina iPad.
I executed the app with no change on both iPads and the behaviour is totally different.
On the Retina iPad the images come back to the original position and the transform doesn't take place.
The code takes a group of views, add them to a temporal view, resize the temporal view and them add those views back, so I can resize all of them at the same time.
- (IBAction)scaleParts:(UIPinchGestureRecognizer *)sender {
if (sender.state == UIGestureRecognizerStateBegan) {
self.tempCanvasView = [[UIView alloc] initWithFrame:self.canvas.bounds];
self.tempCanvasView.backgroundColor = [UIColor redColor];
for (UIView *view in [self.canvas subviews]) {
CGRect currentFrame = view.bounds;
CGRect newFrame = [view convertRect:currentFrame toView:self.tempCanvasView];
view.bounds = newFrame;
[self.tempCanvasView addSubview:view];
}
[self.canvas addSubview:self.tempCanvasView];
} else if (sender.state == UIGestureRecognizerStateChanged) {
self.tempCanvasView.transform = CGAffineTransformScale(self.tempCanvasView.transform, sender.scale, sender.scale);
sender.scale = 1.0;
} else if (sender.state == UIGestureRecognizerStateEnded) {
for (UIView *view in [self.tempCanvasView subviews]) {
CGRect currentFrame = view.bounds;
CGRect newFrame = [view convertRect:currentFrame toView:self.canvas];
view.frame = newFrame;
[self.canvas addSubview:view];
}
[self.tempCanvasView removeFromSuperview];
self.tempCanvasView = nil;
}
}
Maybe you need to detect which screen type the app is using (scale=1 for iPad and 2 for iPadRetina).
CGFloat scale = [[UIScreen mainScreen] scale];
after that..
if (scale==1) {
//code from above
} else {
//code from above with small modification
}
Here is a correct method to do it:
SWIFT:
extension UIView {
func convertRectCorrectly(rect: CGRect, toView view: UIView) -> CGRect {
if UIScreen.mainScreen().scale == 1 {
return self.convertRect(rect, toView: view)
}
else if self == view {
return rect
}
else {
var rectInParent = self.convertRect(rect, toView: self.superview)
rectInParent.origin.x /= UIScreen.mainScreen().scale
rectInParent.origin.y /= UIScreen.mainScreen().scale
let superViewRect = self.superview!.convertRectCorrectly(self.superview!.frame, toView: view)
rectInParent.origin.x += superViewRect.origin.x
rectInParent.origin.y += superViewRect.origin.y
return rectInParent
}
}
}
I have a problem in scaling the uiimageview which is placed inside the uiscrollview. I have googled and checked all the questions related to my problem in StackOverflow as well. I tried all the answers that are posted in the StackOverflow also. Nothing worked for me.
First I am placing the uiimageview inside uiscrollview in nib file and I am taking the image from Camera roll and filling the image view. Then I am using uirotationgesturerecognizer to rotate the image.
Here is the code that I am trying to do.
- (void)viewDidLoad
{
[super viewDidLoad];
NSLog(#"%#",[[UIDevice currentDevice] model]);
// Do any additional setup after loading the view, typically from a nib.
self.imagePicker = [[[UIImagePickerController alloc] init] autorelease];
self.picChosenImageView.layer.shouldRasterize = YES;
self.picChosenImageView.layer.rasterizationScale = [UIScreen mainScreen].scale;
self.picChosenImageView.layer.contents = (id)[UIImage imageNamed:#"test"].CGImage;
self.picChosenImageView.layer.shadowColor = [UIColor blackColor].CGColor;
self.picChosenImageView.layer.shadowOpacity = 0.8f;
self.picChosenImageView.layer.shadowRadius = 8;
self.picChosenImageView.layer.shadowPath = [UIBezierPath bezierPathWithRect:self.picChosenImageView.bounds].CGPath;
UIRotationGestureRecognizer *rotationRecognizer = [[[UIRotationGestureRecognizer alloc]initWithTarget:self
action:#selector(handleRotate:)] autorelease];
rotationRecognizer.delegate = self;
[self.picChosenImageView addGestureRecognizer:rotationRecognizer];
self.containerView.delegate = self;
self.containerView.contentSize = self.picChosenImageView.layer.frame.size;
self.containerView.maximumZoomScale = 4.0f;
self.containerView.minimumZoomScale = 1.0f;
angle = 0.0f;
useRotation = 0.0;
isRotationStarted=FALSE;
isZoomingStarted = FALSE;
}
-(void)lockZoom
{
maximumZoomScale = self.containerView.maximumZoomScale;
minimumZoomScale = self.containerView.minimumZoomScale;
self.containerView.maximumZoomScale = 1.0;
self.containerView.minimumZoomScale = 1.0;
self.containerView.clipsToBounds = false;
self.containerView.scrollEnabled = false;
}
-(void)unlockZoom
{
self.containerView.maximumZoomScale = maximumZoomScale;
self.containerView.minimumZoomScale = minimumZoomScale;
self.containerView.clipsToBounds = true;
self.containerView.scrollEnabled = true;
}
#pragma mark - ScrollView delegate methods
- (UIView *)viewForZoomingInScrollView:(UIScrollView *)scrollView
{
return self.picChosenImageView;
}
- (void)scrollViewDidZoom:(UIScrollView *)scrollView
{
CGRect frame = self.picChosenImageView.frame;
frame.origin = CGPointZero;
self.picChosenImageView.frame = frame;
//self.picChosenImageView.transform = prevTransform;
}
-(void) scrollViewWillBeginZooming:(UIScrollView *)scrollView withView:(UIView *)view
{
if(!isZoomingStarted)
{
self.picChosenImageView.transform = CGAffineTransformRotate(self.picChosenImageView.transform, angle);
NSLog(#"The zooming started");
isZoomingStarted = TRUE;
CGSize contentSize = self.containerView.bounds.size;
CGRect contentFrame = self.containerView.bounds;
NSLog(#"frame on start: %#", NSStringFromCGRect(contentFrame));
NSLog(#"size on start: %#", NSStringFromCGSize(contentSize));
//prevTransform = self.picChosenImageView.transform;
}
}
-(void) scrollViewDidEndZooming:(UIScrollView *)scrollView withView:(UIView *)view atScale:(float)scale
{
if(isZoomingStarted)
{
self.picChosenImageView.transform = CGAffineTransformRotate(self.picChosenImageView.transform, angle);
isZoomingStarted = FALSE;
CGSize contentSize = self.containerView.contentSize;
CGRect contentFrame = self.containerView.bounds;
NSLog(#"frame on end: %#", NSStringFromCGRect(contentFrame));
NSLog(#"size on end: %#", NSStringFromCGSize(contentSize));
}
}
#pragma mark - GestureRecognizer methods
- (void) handleRotate:(UIRotationGestureRecognizer *)recognizer
{
if(isZoomingStarted == FALSE)
{
if([recognizer state] == UIGestureRecognizerStateBegan)
{
angle = 0.0f;
[self lockZoom];
}
useRotation+= recognizer.rotation;
while( useRotation < -M_PI )
{
useRotation += M_PI*2;
}
while( useRotation > M_PI )
{
useRotation -= M_PI*2;
}
NSLog(#"The rotated value is %f",RADIANS_TO_DEGREES(useRotation));
self.picChosenImageView.transform = CGAffineTransformRotate([self.picChosenImageView transform],
recognizer.rotation);
[recognizer setRotation:0];
if([recognizer state] == UIGestureRecognizerStateEnded)
{
angle = useRotation;
useRotation = 0.0f;
isRotationStarted = FALSE;
self.containerView.hidden = NO;
//prevTransform = self.picChosenImageView.transform;
[self unlockZoom];
}
}
}
My problem is, I am able to successfully do a zoom in and zoom out. I am able to rotate the uiimageview as I wanted to. After rotating the uiimageview to a certain angle, and when I am trying to zoom in, the imageview gets back to the original position (rotate itself back to zero degree) and then the zooming happens. I want to retain the rotation and also zoom. I tried saving the previous transform and assign in back scrollDidzoom and scrollDidBegin delegate methods. None worked. Please help me to spot my mistake which I am overlooking.
try using CGAffineTransformScale instead of just resizing the frame for zooming:
anImage.transform = CGAffineTransformScale(anImage.transform, 2.0, 2.0);
changing the transform for scaling might fix your rotation issue.
hope this helps.
I had the same problem. UIScrollView is taking control over UIImageView and it is using transform without rotation.
So I do not give image reference to scroll and I have added UIPinchGestureRecognizer for scaling.
func viewForZoomingInScrollView(scrollView: UIScrollView) -> UIView? {
return nil
}
Dragging is still working :)
// viewDidLoad
var pinchGestureRecognizer = UIPinchGestureRecognizer(target: self, action: #selector(pinchRecogniezed))
scrollView.addGestureRecognizer(pinchGestureRecognizer)
func pinchRecogniezed(sender: UIPinchGestureRecognizer) {
if sender.state == .Began || sender.state == .Changed {
let scale = sender.scale
imageView.transform = CGAffineTransformScale(imageView.transform, scale, scale)
sender.scale = 1
}
}
I'm aware that there's the UIRotateGestureRecognizer already part of the iOS. But this gesture required two fingers. How I can implement a similar gesture recognizer requiring only one finger? There's a game in the AppStore - Gyrotate with a pretty good implementation of this. Any clues are appreciated. Thx.
Kirby Turner has a complete one finger rotation gesture recognizer here.
Here's the code - it works on my simulator. Mark answered if that is what you were looking for.
// On new touch, start a new array of points
- (void) touchesBegan:(NSSet *) touches withEvent:(UIEvent *) event
{
self.points = [NSMutableArray array];
CGPoint pt = [[touches anyObject] locationInView:self];
[self.points addObject:[NSValue valueWithCGPoint:pt]];
}
// Add each point to the array
- (void) touchesMoved:(NSSet *) touches withEvent:(UIEvent *) event
{
CGPoint pt = [[touches anyObject] locationInView:self];
[self.points addObject:[NSValue valueWithCGPoint:pt]];
[self setNeedsDisplay];
}
// At the end of touches, determine whether a circle was drawn
- (void) touchesEnded:(NSSet *) touches withEvent:(UIEvent *) event
{
if (!self.points) return;
if (self.points.count < 3) return;
// Test 1: The start and end points must be between 60 pixels of each other
CGRect tcircle;
if (distance(POINT(0), POINT(self.points.count - 1)) < 60.0f)
tcircle = [self centeredRectangle];
// Test 2: Count the distance traveled in degrees. Must fall within 45 degrees of 2 PI
CGPoint center = CGPointMake(CGRectGetMidX(tcircle), CGRectGetMidY(tcircle));
float distance = ABS(acos(dotproduct(centerPoint(POINT(0), center), centerPoint(POINT(1), center))));
for (int i = 1; i < (self.points.count - 1); i++)
distance += ABS(acos(dotproduct(centerPoint(POINT(i), center), centerPoint(POINT(i+1), center))));
if ((ABS(distance - 2 * M_PI) < (M_PI / 4.0f))) circle = tcircle;
[self setNeedsDisplay];
}
I implemented IQStickerView with OneFingerRotation, Scale, Resize and Close feature.
Features:-
1) One Finger Rotation Scale.
2) One Finger Resize.
3) Enable/Desable Rotation, Scale, Resize with properties.
4) Auto manage Multiple IQStickerView.
5) Can work with UIScrollView also.
6) Fast Responsiveness.
github repositor is here:- https://github.com/hackiftekhar/IQStickerView
Try exploring UIGestureRecognizer Class. You should be able to customize it using UIGestureRecognizerDelegate
Implemented with pan gesture recogniser, this is using another UIView that the gesture recogniser is attached to but it should work with it attached to the view you want to rotate.
- (void) gestureRotateButtonPan:(UIPanGestureRecognizer*) gestureRecognizer {
switch (gestureRecognizer.state) {
case UIGestureRecognizerStatePossible:
break;
case UIGestureRecognizerStateBegan:
{
CGPoint location = [gestureRecognizer translationInView:[gestureRecognizer view]];
_touchRotateStartPoint = [self convertPoint:location fromView:[gestureRecognizer view]];
}
break;
case UIGestureRecognizerStateChanged:
{
CGPoint imageLocation = CGPointMake(self.mainImageView.transform.tx + self.mainImageView.center.x
, self.mainImageView.transform.ty + self.mainImageView.center.y);
CGPoint buttonLocation = [gestureRecognizer translationInView:self];
buttonLocation.x += _touchRotateStartPoint.x;
buttonLocation.y += _touchRotateStartPoint.y;
CGFloat currentRotation = atan2(buttonLocation.y - imageLocation.y, buttonLocation.x - imageLocation.x)
- atan2(_touchRotateStartPoint.y - imageLocation.y, _touchRotateStartPoint.x - imageLocation.x);
CGFloat rotation = -(_lastRotation - currentRotation);
CGAffineTransform currentTransform = self.mainImageView.transform;
CGAffineTransform rotatedTransform = CGAffineTransformRotate(currentTransform, rotation);
self.mainImageView.transform = rotatedTransform;
[self setNeedsDisplay];
}
break;
case UIGestureRecognizerStateCancelled:
case UIGestureRecognizerStateEnded:
_lastRotation = 0.0;
break;
case UIGestureRecognizerStateFailed:
break;
default:
break;
}
}
One Finger Rotation on UIView using PanGestureRecogniser
Fully Functional code with Swift5
import UIKit
class ViewController: UIViewController {
//Variable for rotating
private var deltaAngle:CGFloat = 0
let squareView : UIView = {
let anyView = UIView()
anyView.backgroundColor = .red
anyView.isUserInteractionEnabled = true
anyView.isMultipleTouchEnabled = true
return anyView
}()
let rotateButton : UIButton = {
let button = UIButton()
button.backgroundColor = .black
button.setImage(UIImage(systemName: "rotate.right"), for: .normal)
return button
}()
override func viewDidLoad() {
super.viewDidLoad()
squareView.frame = CGRect(x: 50, y: 50, width: 100, height: 100)
rotateButton.frame = CGRect(x: 0, y: squareView.frame.height-30, width: 30, height: 30)
squareView.center = view.center
view.addSubview(squareView)
squareView.addSubview(rotateButton)
let PanToRotate = UIPanGestureRecognizer(target: self, action: #selector(handleRotateGesture(_:)))
rotateButton.addGestureRecognizer(PanToRotate)
}
#objc func handleRotateGesture(_ recognizer : UIPanGestureRecognizer){
let touchLocation = recognizer.location(in: squareView.superview)
let center = squareView.center
switch recognizer.state{
case .began :
self.deltaAngle = atan2(touchLocation.y - center.y, touchLocation.x - center.x) - atan2(squareView.transform.b, squareView.transform.a)
case .changed:
let angle = atan2(touchLocation.y - center.y, touchLocation.x - center.x)
let angleDiff = self.deltaAngle - angle
squareView.transform = CGAffineTransform(rotationAngle: -angleDiff)
default: break
}
}
}