I have made a mess of my translation from Obj-C to Swift so I'd really appreciate a refactor/code layout review. The curly braces are really throwing me. Are there any Xcode plugins or something to help me better manage my code blocks?
Some of my functions and calculations may not be so efficient as well so if you have any suggestions for those areas that would be great too. For example if you have used or seen better filter algorithms etc.
p.s. thanks Martin.
import UIKit
import Foundation
import AVFoundation
import CoreMedia
import CoreVideo
let minFramesForFilterToSettle = 10
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let captureSession = AVCaptureSession()
// If we find a device we'll store it here for later use
var captureDevice : AVCaptureDevice?
var validFrameCounter: Int = 0
var detector: Detector!
var filter: Filter!
// var currentState = CurrentState.stateSampling // Is this initialized correctly?
override func viewDidLoad() {
super.viewDidLoad()
self.detector = Detector()
self.filter = Filter()
// startCameraCapture() // call to un-used function.
captureSession.sessionPreset = AVCaptureSessionPresetHigh
let devices = AVCaptureDevice.devices()
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.Front) {
captureDevice = device as? AVCaptureDevice
if captureDevice != nil {
//println("Capture device found")
beginSession()
}
}
}
}
} // end of viewDidLoad ???
// configure device for camera and focus mode // maybe not needed since we dont use focuc?
func configureDevice() {
if let device = captureDevice {
device.lockForConfiguration(nil)
//device.focusMode = .Locked
device.unlockForConfiguration()
}
}
// start capturing frames
func beginSession() {
// Create the AVCapture Session
configureDevice()
var err : NSError? = nil
captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err))
if err != nil {
println("error: \(err?.localizedDescription)")
}
// Automatic Switch ON torch mode
if captureDevice!.hasTorch {
// lock your device for configuration
captureDevice!.lockForConfiguration(nil)
// check if your torchMode is on or off. If on turns it off otherwise turns it on
captureDevice!.torchMode = captureDevice!.torchActive ? AVCaptureTorchMode.Off : AVCaptureTorchMode.On
// sets the torch intensity to 100%
captureDevice!.setTorchModeOnWithLevel(1.0, error: nil)
// unlock your device
captureDevice!.unlockForConfiguration()
}
// Create a AVCaptureInput with the camera device
var deviceInput : AVCaptureInput = AVCaptureDeviceInput.deviceInputWithDevice(captureDevice, error: &err) as! AVCaptureInput
if deviceInput == nil! {
println("error: \(err?.localizedDescription)")
}
// Set the output
var videoOutput : AVCaptureVideoDataOutput = AVCaptureVideoDataOutput()
// create a queue to run the capture on
var captureQueue : dispatch_queue_t = dispatch_queue_create("captureQueue", nil)
// setup ourself up as the capture delegate
videoOutput.setSampleBufferDelegate(self, queue: captureQueue)
// configure the pixel format
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String : Int(kCVPixelFormatType_32BGRA)] // kCVPixelBufferPixelFormatTypeKey is a CFString btw.
// set the minimum acceptable frame rate to 10 fps
captureDevice!.activeVideoMinFrameDuration = CMTimeMake(1, 10)
// and the size of the frames we want - we'll use the smallest frame size available
captureSession.sessionPreset = AVCaptureSessionPresetLow
// Add the input and output
captureSession.addInput(deviceInput)
captureSession.addOutput(videoOutput)
// Start the session
captureSession.startRunning()
// we're now sampling from the camera
enum CurrentState {
case statePaused
case stateSampling
}
var currentState = CurrentState.statePaused
func setState(state: CurrentState){
switch state
{
case .statePaused:
// what goes here? Something like this?
UIApplication.sharedApplication().idleTimerDisabled = false
case .stateSampling:
// what goes here? Something like this?
UIApplication.sharedApplication().idleTimerDisabled = true // singletons
}
}
// we're now sampling from the camera
currentState = CurrentState.stateSampling
// stop the app from sleeping
UIApplication.sharedApplication().idleTimerDisabled = true
// update our UI on a timer every 0.1 seconds
NSTimer.scheduledTimerWithTimeInterval(0.1, target: self, selector: Selector("update"), userInfo: nil, repeats: true)
func stopCameraCapture() {
captureSession.stopRunning()
captureSession = nil
}
// pragma mark Pause and Resume of detection
func pause() {
if currentState == CurrentState.statePaused {
return
}
// switch off the torch
if captureDevice!.isTorchModeSupported(AVCaptureTorchMode.On) {
captureDevice!.lockForConfiguration(nil)
captureDevice!.torchMode = AVCaptureTorchMode.Off
captureDevice!.unlockForConfiguration()
}
currentState = CurrentState.statePaused
// let the application go to sleep if the phone is idle
UIApplication.sharedApplication().idleTimerDisabled = false
}
func resume() {
if currentState != CurrentState.statePaused {
return
}
// switch on the torch
if captureDevice!.isTorchModeSupported(AVCaptureTorchMode.On) {
captureDevice!.lockForConfiguration(nil)
captureDevice!.torchMode = AVCaptureTorchMode.On
captureDevice!.unlockForConfiguration()
}
currentState = CurrentState.stateSampling
// stop the app from sleeping
UIApplication.sharedApplication().idleTimerDisabled = true
}
// beginning of paste
// r,g,b values are from 0 to 1 // h = [0,360], s = [0,1], v = [0,1]
// if s == 0, then h = -1 (undefined)
func RGBtoHSV(r : Float, g : Float, b : Float, inout h : Float, inout s : Float, inout v : Float) {
let rgbMin = min(r, g, b)
let rgbMax = max(r, g, b)
let delta = rgbMax - rgbMin
v = rgbMax
s = delta/rgbMax
h = Float(0.0)
// start of calculation
if (rgbMax != 0) {
s = delta / rgbMax
}
else{
// r = g = b = 0
s = 0
h = -1
return
}
if r == rgbMax {
h = (g - b) / delta
}
else if (g == rgbMax) {
h = 2 + (b - r ) / delta
}
else{
h = 4 + (r - g) / delta
h = 60
}
if (h < 0) {
h += 360
}
}
// process the frame of video
func captureOutput(captureOutput:AVCaptureOutput, didOutputSampleBuffer sampleBuffer:CMSampleBuffer, fromConnection connection:AVCaptureConnection) {
// if we're paused don't do anything
if currentState == CurrentState.statePaused {
// reset our frame counter
self.validFrameCounter = 0
return
}
// this is the image buffer
var cvimgRef:CVImageBufferRef = CMSampleBufferGetImageBuffer(sampleBuffer)
// Lock the image buffer
CVPixelBufferLockBaseAddress(cvimgRef, 0)
// access the data
var width: size_t = CVPixelBufferGetWidth(cvimgRef)
var height:size_t = CVPixelBufferGetHeight(cvimgRef)
// get the raw image bytes
let buf = UnsafeMutablePointer<UInt8>(CVPixelBufferGetBaseAddress(cvimgRef))
var bprow: size_t = CVPixelBufferGetBytesPerRow(cvimgRef)
var r = 0
var g = 0
var b = 0
for var y = 0; y < height; y++ {
for var x = 0; x < width * 4; x += 4 {
b+=buf[x](UnsafeMutablePointer(UInt8)) // fix
g+=buf[x + 1](UnsafeMutablePointer(Float)) // fix
r+=buf[x + 2](UnsafeMutablePointer(Int)) // fix
}
buf += bprow()
}
r /= 255 * (width*height)
g /= 255 * (width*height)
b /= 255 * (width*height)
}
// convert from rgb to hsv colourspace
var h = Float()
var s = Float()
var v = Float()
RGBtoHSV(r, g, b, &h, &s, &v)
// do a sanity check for blackness
if s > 0.5 && v > 0.5 {
// increment the valid frame count
validFrameCounter++
// filter the hue value - the filter is a simple band pass filter that removes any DC component and any high frequency noise
var filtered: Float = filter.processValue(h)
// have we collected enough frames for the filter to settle?
if validFrameCounter > minFramesForFilterToSettle {
// add the new value to the detector
detector.addNewValue(filtered, atTime: CACurrentMediaTime())
}
} else {
validFrameCounter = 0
// clear the detector - we only really need to do this once, just before we start adding valid samples
detector.reset()
}
}
You can actually do that
RGBtoHSV(r: r, g: g, b: b, h: &h, s: &s, v: &v)
-(void)update:(NSTimeInterval)currentTime {
if(_gameState == STARTING) {
_startedTime = currentTime;
_gameState = PLAYING;
}
if(_gameState == PLAYING) {
int timeLeftRounded = ceil(LEVEL_TIME+(_startedTime - currentTime));
timerLabel.text = [NSString stringWithFormat:#"Time: %d", timeLeftRounded];
}
}
error in the line
int timeLeftRounded = ceil(LEVEL_TIME+(_startedTime - currentTime));
error message says i should delete the last ). but its not right and still getting the error.
I have a tic tac toe game here in Swift and I need a way to disable tap gesture recognition on spaces that have been played so that on their turn, the user cannot just tap places that have been played.
I have tried putting the line
ticTacImages[spot].removeGestureRecognizer(UITapGestureRecognizer(target: self, action: "imageClicked:"))
in my imageClicked and setImageForSpot functions and nothing happens. What am I doing wrong here?
The code involved:
for imageView in ticTacImages {
imageView.userInteractionEnabled = true
imageView.addGestureRecognizer(UITapGestureRecognizer(target: self, action: "imageClicked:"))
}
}
//Gesture Reocgnizer method
func imageClicked(reco: UITapGestureRecognizer) {
var imageViewTapped = reco.view as UIImageView
println(plays[imageViewTapped.tag])
println(aiDeciding)
println(done)
opening1.hidden = true
opening2.hidden = true
opening3.hidden = true
if plays[imageViewTapped.tag] == nil && !aiDeciding && !done {
setImageForSpot(imageViewTapped.tag, player:.UserPlayer)
}
checkForWin()
let delay = 1 * Double(NSEC_PER_SEC)
let time = dispatch_time(DISPATCH_TIME_NOW, Int64(delay))
//During delay
for imageView in ticTacImages {
imageView.userInteractionEnabled = false
}
dispatch_after(time, dispatch_get_main_queue(), {
//After delay
for imageView in self.ticTacImages {
imageView.userInteractionEnabled = true
}
self.aiTurn()
})
}
var varChanger: Int?
var playerMark: String?
func setImageForSpot(spot:Int,player:Player){
if varChanger == 1 {
playerMark = player == .UserPlayer ? "blue_x" : "blue_o"
}
else if varChanger == 2 {
playerMark = player == .UserPlayer ? "green_x" : "green_o"
}
else if varChanger == 3 {
playerMark = player == .UserPlayer ? "purple_x" : "purple_o"
}
else if varChanger == 4 {
playerMark = player == .UserPlayer ? "pink_x" : "pink_o"
}
else if varChanger == 5 {
playerMark = player == .UserPlayer ? "yellow_x" : "yellow_o"
}
else {
playerMark = player == .UserPlayer ? "red_x" : "red_o"
}
println("setting spot \(player.toRaw()) spot \(spot)")
plays[spot] = player.toRaw()
ticTacImages[spot].image = UIImage(named: playerMark)
}
You can only removeGestureRecognizer() for a gesture recognizer that has already been added. In your example you're creating a new one before removing it — instead you should keep track of the old one, or call ticTacImages[spot].gestureRecognizers to get an array of the ones which have been added.
I want to query the orientation the iPhone is currently in. Using
[UIDevice currentDevice].orientation
works as long as the device isn't orientation-locked. If it is locked, however, it always responds with the locked orientation, not with the actual orientation of the device.
Is there a high-level way to get the actual device orientation?
Also you can use CoreMotion
Orientation detection algorithm:
if abs( y ) < abs( x ) your iPhone is in landscape position, look sign of x to detect right
or left
else your iPhone is in portrait position, look sign of y to detect up or upside-down.
If you are interested in face-up or down, look value of z.
import CoreMotion
var uMM: CMMotionManager!
override func
viewWillAppear( p: Bool ) {
super.viewWillAppear( p )
uMM = CMMotionManager()
uMM.accelerometerUpdateInterval = 0.2
// Using main queue is not recommended. So create new operation queue and pass it to startAccelerometerUpdatesToQueue.
// Dispatch U/I code to main thread using dispach_async in the handler.
uMM.startAccelerometerUpdatesToQueue( NSOperationQueue() ) { p, _ in
if p != nil {
println(
abs( p.acceleration.y ) < abs( p.acceleration.x )
? p.acceleration.x > 0 ? "Right" : "Left"
: p.acceleration.y > 0 ? "Down" : "Up"
)
}
}
}
override func
viewDidDisappear( p: Bool ) {
super.viewDidDisappear( p )
uMM.stopAccelerometerUpdates()
}
That functionality is correct. If it always returned the device orientation, even if it was locked, the orientation changed notifications would fire. This would defeat the purpose of the lock.
To answer your question, there is no way to read the raw values from the accelerometer, without using private APIs.
Edit:
After reviewing the documentation, it seems that the UIAccelerometer class provides this data, even when the orientation is locked. This change was applied in iOS 4 and above. Even though you can use this data, you still need to process it to determine the orientation. This is not an easy task as you need to monitor the changes constantly and compare them to older values.
Also, take a look at this guide for handling motion events. This may provide you with another route to determining the orientation.
Set up your view controller or whatever to support the UIAccelerometerProtocol, and start listening for changes (you can set it to 10 hz).
#define kAccelerometerFrequency 10.0 //Hz
-(void)viewDidAppear:(BOOL)animated {
DLog(#"viewDidAppear");
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
UIAccelerometer* a = [UIAccelerometer sharedAccelerometer];
a.updateInterval = 1 / kAccelerometerFrequency;
a.delegate = self;
}
-(void)viewWillDisappear:(BOOL)animated {
DLog(#"viewWillDisappear");
UIAccelerometer* a = [UIAccelerometer sharedAccelerometer];
a.delegate = nil;
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
}
#ifdef DEBUG
+(NSString*)orientationToText:(const UIInterfaceOrientation)ORIENTATION {
switch (ORIENTATION) {
case UIInterfaceOrientationPortrait:
return #"UIInterfaceOrientationPortrait";
case UIInterfaceOrientationPortraitUpsideDown:
return #"UIInterfaceOrientationPortraitUpsideDown";
case UIInterfaceOrientationLandscapeLeft:
return #"UIInterfaceOrientationLandscapeLeft";
case UIInterfaceOrientationLandscapeRight:
return #"UIInterfaceOrientationLandscapeRight";
}
return #"Unknown orientation!";
}
#endif
#pragma mark UIAccelerometerDelegate
-(void)accelerometer:(UIAccelerometer *)accelerometer didAccelerate:(UIAcceleration *)acceleration {
UIInterfaceOrientation orientationNew;
if (acceleration.x >= 0.75) {
orientationNew = UIInterfaceOrientationLandscapeLeft;
}
else if (acceleration.x <= -0.75) {
orientationNew = UIInterfaceOrientationLandscapeRight;
}
else if (acceleration.y <= -0.75) {
orientationNew = UIInterfaceOrientationPortrait;
}
else if (acceleration.y >= 0.75) {
orientationNew = UIInterfaceOrientationPortraitUpsideDown;
}
else {
// Consider same as last time
return;
}
if (orientationNew == orientationLast)
return;
NSLog(#"Going from %# to %#!", [[self class] orientationToText:orientationLast], [[self class] orientationToText:orientationNew]);
orientationLast = orientationNew;
}
#pragma mark -
You need to define UIInterfaceOrientation orientationLast as a member variable and you're set.
Handling all 6 orientations
Though we don't often care about FaceUp / FaceDown orientations, they're still important.
Taking them into account leads to a much more appropriate sensitivity for orientation changes, while leaving them out can lead to metastability & hysteresis.
Here's how I handled it -
- (void)startMonitoring
{
[self.motionManager startAccelerometerUpdatesToQueue:self.opQueue withHandler:^(CMAccelerometerData * _Nullable accelerometerData, NSError * _Nullable error) {
if (error != nil)
{
NSLog(#"Accelerometer error: %#", error);
}
else
{
float const threshold = 40.0;
BOOL (^isNearValue) (float value1, float value2) = ^BOOL(float value1, float value2)
{
return fabsf(value1 - value2) < threshold;
};
BOOL (^isNearValueABS) (float value1, float value2) = ^BOOL(float value1, float value2)
{
return isNearValue(fabsf(value1), fabsf(value2));
};
float yxAtan = (atan2(accelerometerData.acceleration.y, accelerometerData.acceleration.x)) * 180 / M_PI;
float zyAtan = (atan2(accelerometerData.acceleration.z, accelerometerData.acceleration.y)) * 180 / M_PI;
float zxAtan = (atan2(accelerometerData.acceleration.z, accelerometerData.acceleration.x)) * 180 / M_PI;
UIDeviceOrientation orientation = self.orientation;
if (isNearValue(-90.0, yxAtan) && isNearValueABS(180.0, zyAtan))
{
orientation = UIDeviceOrientationPortrait;
}
else if (isNearValueABS(180.0, yxAtan) && isNearValueABS(180.0, zxAtan))
{
orientation = UIDeviceOrientationLandscapeLeft;
}
else if (isNearValueABS(0.0, yxAtan) && isNearValueABS(0.0, zxAtan))
{
orientation = UIDeviceOrientationLandscapeRight;
}
else if (isNearValue(90.0, yxAtan) && isNearValueABS(0.0, zyAtan))
{
orientation = UIDeviceOrientationPortraitUpsideDown;
}
else if (isNearValue(-90.0, zyAtan) && isNearValue(-90.0, zxAtan))
{
orientation = UIDeviceOrientationFaceUp;
}
else if (isNearValue(90.0, zyAtan) && isNearValue(90.0, zxAtan))
{
orientation = UIDeviceOrientationFaceDown;
}
if (self.orientation != orientation)
{
dispatch_async(dispatch_get_main_queue(), ^{
[self orientationDidChange:orientation];
});
}
}
}];
}
Additionally, I've added a threshold value of 40.0 (instead of 45.0). This makes changes less sensitive, preventing hysteresis at inflection points.
If you only want to react to changes of the main 4 orientations, just do this
if (UIDeviceOrientationIsPortrait(orientation) || UIDeviceOrientationIsLandscape(orientation))
{
// Do something
}
The UIAccelerometer class continues to function when the device orientation is locked. You'll have to work out your own methods of turning its variables into orientation values, but it shouldn't be especially complicated.
Have a play with Apple's AcceleromoterGraph sample app to see what values the accelerometer outputs in different orientations.
my solution using coremotion,it work even when the device has his orientation locked.
let motionManager: CMMotionManager = CMMotionManager()
on the did load method
motionManager.deviceMotionUpdateInterval = 0.01
if motionManager.accelerometerAvailable{
let queue = NSOperationQueue()
motionManager.startAccelerometerUpdatesToQueue(queue, withHandler:
{data, error in
guard let data = data else{
return
}
let angle = (atan2(data.acceleration.y,data.acceleration.x))*180/M_PI;
print(angle)
if(fabs(angle)<=45){
self.orientation = AVCaptureVideoOrientation.LandscapeLeft
print("landscape left")
}else if((fabs(angle)>45)&&(fabs(angle)<135)){
if(angle>0){
self.orientation = AVCaptureVideoOrientation.PortraitUpsideDown
print("portrait upside Down")
}else{
self.orientation = AVCaptureVideoOrientation.Portrait
print("portrait")
}
}else{
self.orientation = AVCaptureVideoOrientation.LandscapeRight
print("landscape right")
}
}
)
} else {
print("Accelerometer is not available")
}
hope it helps.
Most of the answers are using accelerometer, which is the overall acceleration = user + gravity.
But to get a device orientation, it is more accurate to use the gravity acceleration. Using gravity will prevent the edge case when user moves in a particular direction. To access the gravity, we have to use startDeviceMotionUpdates API instead.
let motionManager = CMMotionManager()
motionManager.startDeviceMotionUpdates(to: OperationQueue()) { (data, error) in
guard let gravity = data?.gravity else { return }
let newDeviceOrientation: UIDeviceOrientation
if abs(gravity.y) < abs(gravity.x) {
newDeviceOrientation = gravity.x > 0 ? .landscapeRight : .landscapeLeft
} else {
newDeviceOrientation = gravity.y > 0 ? .portraitUpsideDown : .portrait
}
}
Here is an example of detect device rotation and return UIDeviceOrientation.
This solution using CoreMotion and works in all cases.
Example
let orientationManager = APOrientationManager()
orientationManager.delegate = self
/// start detect rotation
orientationManager.startMeasuring()
/// get current interface orientation
let orientation = orientationManager.currentInterfaceOrientation()
print(orientation.rawValue)
/// stop detect rotation
orientationManager.stopMeasuring()
orientationManager.delegate = nil
conform delegate
extension ViewController: APOrientationManagerDelegate {
func didChange(deviceOrientation: UIDeviceOrientation) {
/// update UI in main thread
}
}
APOrientationManager.swift
import Foundation
import CoreMotion
import AVFoundation
import UIKit
protocol APOrientationManagerDelegate: class {
func didChange(deviceOrientation: UIDeviceOrientation)
}
class APOrientationManager {
private let motionManager = CMMotionManager()
private let queue = OperationQueue()
private var deviceOrientation: UIDeviceOrientation = .unknown
weak var delegate: APOrientationManagerDelegate?
init() {
motionManager.accelerometerUpdateInterval = 1.0
motionManager.deviceMotionUpdateInterval = 1.0
motionManager.gyroUpdateInterval = 1.0
motionManager.magnetometerUpdateInterval = 1.0
}
func startMeasuring() {
guard motionManager.isDeviceMotionAvailable else {
return
}
motionManager.startAccelerometerUpdates(to: queue) { [weak self] (accelerometerData, error) in
guard let strongSelf = self else {
return
}
guard let accelerometerData = accelerometerData else {
return
}
let acceleration = accelerometerData.acceleration
let xx = -acceleration.x
let yy = acceleration.y
let z = acceleration.z
let angle = atan2(yy, xx)
var deviceOrientation = strongSelf.deviceOrientation
let absoluteZ = fabs(z)
if deviceOrientation == .faceUp || deviceOrientation == .faceDown {
if absoluteZ < 0.845 {
if angle < -2.6 {
deviceOrientation = .landscapeRight
} else if angle > -2.05 && angle < -1.1 {
deviceOrientation = .portrait
} else if angle > -0.48 && angle < 0.48 {
deviceOrientation = .landscapeLeft
} else if angle > 1.08 && angle < 2.08 {
deviceOrientation = .portraitUpsideDown
}
} else if z < 0 {
deviceOrientation = .faceUp
} else if z > 0 {
deviceOrientation = .faceDown
}
} else {
if z > 0.875 {
deviceOrientation = .faceDown
} else if z < -0.875 {
deviceOrientation = .faceUp
} else {
switch deviceOrientation {
case .landscapeLeft:
if angle < -1.07 {
deviceOrientation = .portrait
}
if angle > 1.08 {
deviceOrientation = .portraitUpsideDown
}
case .landscapeRight:
if angle < 0 && angle > -2.05 {
deviceOrientation = .portrait
}
if angle > 0 && angle < 2.05 {
deviceOrientation = .portraitUpsideDown
}
case .portraitUpsideDown:
if angle > 2.66 {
deviceOrientation = .landscapeRight
}
if angle < 0.48 {
deviceOrientation = .landscapeLeft
}
case .portrait:
if angle > -0.47 {
deviceOrientation = .landscapeLeft
}
if angle < -2.64 {
deviceOrientation = .landscapeRight
}
default:
if angle > -0.47 {
deviceOrientation = .landscapeLeft
}
if angle < -2.64 {
deviceOrientation = .landscapeRight
}
}
}
}
if strongSelf.deviceOrientation != deviceOrientation {
strongSelf.deviceOrientation = deviceOrientation
strongSelf.delegate?.didChange(deviceOrientation: deviceOrientation)
}
}
}
func stopMeasuring() {
motionManager.stopAccelerometerUpdates()
}
func currentInterfaceOrientation() -> AVCaptureVideoOrientation {
switch deviceOrientation {
case .portrait:
return .portrait
case .landscapeRight:
return .landscapeLeft
case .landscapeLeft:
return .landscapeRight
case .portraitUpsideDown:
return .portraitUpsideDown
default:
return .portrait
}
}
}
Use of CMMotionManager may help, but not the above way. The above logic is not a stable one. I have tested throughly and found that by seeing the values of acceleration.x/y/z are not helping to determine the orientation.
Instead, I got a way to find the orientation WRT the angle i.e.
float angle = (atan2(accelerometerData.acceleration.y,accelerometerData.acceleration.x))*180/M_PI;
And for orientation,-
if(fabs(angle<=45)currOrientation=UIDeviceOrientationLandscapeRight;
else if((fabs(angle)>45)&&(fabs(angle)<135))currOrientation=((angle>0)?UIDeviceOrientationPortraitUpsideDown:UIDeviceOrientationPortrait);
else currOrientation = UIDeviceOrientationLandscapeLeft;
This might come handy for someone, though this doesn't help me to find 2 other orientations i.e. UIDeviceOrientationFaceUp & UIDeviceOrientationFaceDown.
Using Satachito's great answer here is code which will also detect if the device is face up or face down
import CoreMotion
var mm: CMMotionManager!
init() {
self.mm = CMMotionManager()
self.mm.accelerometerUpdateInterval = 0.2
}
public func startOrientationUpdates() {
// Using main queue is not recommended. So create new operation queue and pass it to startAccelerometerUpdatesToQueue.
// Dispatch U/I code to main thread using dispach_async in the handler.
self.mm.startAccelerometerUpdates( to: OperationQueue() ) { p, _ in
if let p = p {
if(p.acceleration.x > -0.3 && p.acceleration.x < 0.3 && p.acceleration.z < -0.95) {
print("face up")
}
else if(p.acceleration.x > -0.3 && p.acceleration.x < 0.3 && p.acceleration.z > 0.95) {
print("face down")
}
else {
print(
abs( p.acceleration.y ) < abs( p.acceleration.x )
? p.acceleration.x > 0 ? "Right" : "Left"
: p.acceleration.y > 0 ? "Down" : "Up"
)
}
}
}
}
public func endOrientationUpdates() {
self.mm.stopAccelerometerUpdates()
}
I am using the accelerometer to scroll multiple subViews in a UIScrollVIew. I want the view (portrait orientation) to scroll to the right when the user flicks the iPhone to the right, and scroll to the left when the device is flicked to the left.
I thought I could do that just by noting positive or negative x acceleration values, but I see that the values are usually a mixture of positive and negative values. I have set the floor at 1.5g to eliminate non-shake movement, and am looking at the x values over the duration of .5 seconds.
I'm sure there is a trigonometrical method for determining the overall direction of a flick, and that you have to measure values over the duration of the flick motion. I'm also sure that someone has already figured this one out.
Any ideas out there?
Thanks
I developed a solution which gives me better feedback than the proposed solution (only for left and right shake).
The way I did it here is quite sensitive (recognizes a small shake), but sensitivity can be adapted by changing tresholdFirstMove and tresholdBackMove (increase for lower sensitivity)
In Swift :
(in your viewController. And add "import CoreMotion")
var startedLeftTilt = false
var startedRightTilt = false
var dateLastShake = NSDate(timeIntervalSinceNow: -2)
var dateStartedTilt = NSDate(timeIntervalSinceNow: -2)
var motionManager = CMMotionManager()
let tresholdFirstMove = 3.0
let tresholdBackMove = 0.5
override func viewDidLoad() {
// your code
motionManager.gyroUpdateInterval = 0.01
}
override func viewDidAppear(animated: Bool) {
super.viewDidAppear(animated)
motionManager.startGyroUpdatesToQueue(NSOperationQueue.currentQueue(), withHandler: { (gyroData, error) -> Void in
self.handleGyroData(gyroData.rotationRate)
})
}
private func handleGyroData(rotation: CMRotationRate) {
if fabs(rotation.z) > tresholdFirstMove && fabs(dateLastShake.timeIntervalSinceNow) > 0.3
{
if !startedRightTilt && !startedLeftTilt
{
dateStartedTilt = NSDate()
if (rotation.z > 0)
{
startedLeftTilt = true
startedRightTilt = false
}
else
{
startedRightTilt = true
startedLeftTilt = false
}
}
}
if fabs(dateStartedTilt.timeIntervalSinceNow) >= 0.3
{
startedRightTilt = false
startedLeftTilt = false
}
else
{
if (fabs(rotation.z) > tresholdBackMove)
{
if startedLeftTilt && rotation.z < 0
{
dateLastShake = NSDate()
startedRightTilt = false
startedLeftTilt = false
println("\\\n Shaked left\n/")
}
else if startedRightTilt && rotation.z > 0
{
dateLastShake = NSDate()
startedRightTilt = false
startedLeftTilt = false
println("\\\n Shaked right\n/")
}
}
}
}
OK, worked out a solution. When I detect a shake motion (acceleration greater than 1.5 on the x axis), I start a timer and set a BOOL to true. While the BOOL is true I add acceleration values. When the timer expires, I stop adding acceleration values and determine direction of the shake by the sign of the total acceleration.
- (void)accelerometer:(UIAccelerometer *)acel didAccelerate:(UIAcceleration *)aceler {
if (fabsf(aceler.x) > 1.5)
{
shake = YES;
NSTimeInterval myInterval = .75;
[NSTimer scheduledTimerWithTimeInterval:myInterval target:self selector:#selector(endShake) userInfo:nil repeats:NO];
return;
}
if(shake)
{
totalG += aceler.x;
}
}
- (void) endShake {
shake = NO;
int direction;
if (totalG isLessThan 0) direction = 1;
if(totalG isGreaterThan 0) direction = -1;
[self changePageByShake:direction];
totalG = 0;
}
Note: I couldn't get the < and > symbols to format correctly in the codeblock above, so I substituted isLessThan and isGreaterThan for the symbols.