> REQ Swift refactor / code layout / review - swift

I have made a mess of my translation from Obj-C to Swift so I'd really appreciate a refactor/code layout review. The curly braces are really throwing me. Are there any Xcode plugins or something to help me better manage my code blocks?
Some of my functions and calculations may not be so efficient as well so if you have any suggestions for those areas that would be great too. For example if you have used or seen better filter algorithms etc.
p.s. thanks Martin.
import UIKit
import Foundation
import AVFoundation
import CoreMedia
import CoreVideo
let minFramesForFilterToSettle = 10
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let captureSession = AVCaptureSession()
// If we find a device we'll store it here for later use
var captureDevice : AVCaptureDevice?
var validFrameCounter: Int = 0
var detector: Detector!
var filter: Filter!
// var currentState = CurrentState.stateSampling // Is this initialized correctly?
override func viewDidLoad() {
super.viewDidLoad()
self.detector = Detector()
self.filter = Filter()
// startCameraCapture() // call to un-used function.
captureSession.sessionPreset = AVCaptureSessionPresetHigh
let devices = AVCaptureDevice.devices()
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.Front) {
captureDevice = device as? AVCaptureDevice
if captureDevice != nil {
//println("Capture device found")
beginSession()
}
}
}
}
} // end of viewDidLoad ???
// configure device for camera and focus mode // maybe not needed since we dont use focuc?
func configureDevice() {
if let device = captureDevice {
device.lockForConfiguration(nil)
//device.focusMode = .Locked
device.unlockForConfiguration()
}
}
// start capturing frames
func beginSession() {
// Create the AVCapture Session
configureDevice()
var err : NSError? = nil
captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err))
if err != nil {
println("error: \(err?.localizedDescription)")
}
// Automatic Switch ON torch mode
if captureDevice!.hasTorch {
// lock your device for configuration
captureDevice!.lockForConfiguration(nil)
// check if your torchMode is on or off. If on turns it off otherwise turns it on
captureDevice!.torchMode = captureDevice!.torchActive ? AVCaptureTorchMode.Off : AVCaptureTorchMode.On
// sets the torch intensity to 100%
captureDevice!.setTorchModeOnWithLevel(1.0, error: nil)
// unlock your device
captureDevice!.unlockForConfiguration()
}
// Create a AVCaptureInput with the camera device
var deviceInput : AVCaptureInput = AVCaptureDeviceInput.deviceInputWithDevice(captureDevice, error: &err) as! AVCaptureInput
if deviceInput == nil! {
println("error: \(err?.localizedDescription)")
}
// Set the output
var videoOutput : AVCaptureVideoDataOutput = AVCaptureVideoDataOutput()
// create a queue to run the capture on
var captureQueue : dispatch_queue_t = dispatch_queue_create("captureQueue", nil)
// setup ourself up as the capture delegate
videoOutput.setSampleBufferDelegate(self, queue: captureQueue)
// configure the pixel format
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String : Int(kCVPixelFormatType_32BGRA)] // kCVPixelBufferPixelFormatTypeKey is a CFString btw.
// set the minimum acceptable frame rate to 10 fps
captureDevice!.activeVideoMinFrameDuration = CMTimeMake(1, 10)
// and the size of the frames we want - we'll use the smallest frame size available
captureSession.sessionPreset = AVCaptureSessionPresetLow
// Add the input and output
captureSession.addInput(deviceInput)
captureSession.addOutput(videoOutput)
// Start the session
captureSession.startRunning()
// we're now sampling from the camera
enum CurrentState {
case statePaused
case stateSampling
}
var currentState = CurrentState.statePaused
func setState(state: CurrentState){
switch state
{
case .statePaused:
// what goes here? Something like this?
UIApplication.sharedApplication().idleTimerDisabled = false
case .stateSampling:
// what goes here? Something like this?
UIApplication.sharedApplication().idleTimerDisabled = true // singletons
}
}
// we're now sampling from the camera
currentState = CurrentState.stateSampling
// stop the app from sleeping
UIApplication.sharedApplication().idleTimerDisabled = true
// update our UI on a timer every 0.1 seconds
NSTimer.scheduledTimerWithTimeInterval(0.1, target: self, selector: Selector("update"), userInfo: nil, repeats: true)
func stopCameraCapture() {
captureSession.stopRunning()
captureSession = nil
}
// pragma mark Pause and Resume of detection
func pause() {
if currentState == CurrentState.statePaused {
return
}
// switch off the torch
if captureDevice!.isTorchModeSupported(AVCaptureTorchMode.On) {
captureDevice!.lockForConfiguration(nil)
captureDevice!.torchMode = AVCaptureTorchMode.Off
captureDevice!.unlockForConfiguration()
}
currentState = CurrentState.statePaused
// let the application go to sleep if the phone is idle
UIApplication.sharedApplication().idleTimerDisabled = false
}
func resume() {
if currentState != CurrentState.statePaused {
return
}
// switch on the torch
if captureDevice!.isTorchModeSupported(AVCaptureTorchMode.On) {
captureDevice!.lockForConfiguration(nil)
captureDevice!.torchMode = AVCaptureTorchMode.On
captureDevice!.unlockForConfiguration()
}
currentState = CurrentState.stateSampling
// stop the app from sleeping
UIApplication.sharedApplication().idleTimerDisabled = true
}
// beginning of paste
// r,g,b values are from 0 to 1 // h = [0,360], s = [0,1], v = [0,1]
// if s == 0, then h = -1 (undefined)
func RGBtoHSV(r : Float, g : Float, b : Float, inout h : Float, inout s : Float, inout v : Float) {
let rgbMin = min(r, g, b)
let rgbMax = max(r, g, b)
let delta = rgbMax - rgbMin
v = rgbMax
s = delta/rgbMax
h = Float(0.0)
// start of calculation
if (rgbMax != 0) {
s = delta / rgbMax
}
else{
// r = g = b = 0
s = 0
h = -1
return
}
if r == rgbMax {
h = (g - b) / delta
}
else if (g == rgbMax) {
h = 2 + (b - r ) / delta
}
else{
h = 4 + (r - g) / delta
h = 60
}
if (h < 0) {
h += 360
}
}
// process the frame of video
func captureOutput(captureOutput:AVCaptureOutput, didOutputSampleBuffer sampleBuffer:CMSampleBuffer, fromConnection connection:AVCaptureConnection) {
// if we're paused don't do anything
if currentState == CurrentState.statePaused {
// reset our frame counter
self.validFrameCounter = 0
return
}
// this is the image buffer
var cvimgRef:CVImageBufferRef = CMSampleBufferGetImageBuffer(sampleBuffer)
// Lock the image buffer
CVPixelBufferLockBaseAddress(cvimgRef, 0)
// access the data
var width: size_t = CVPixelBufferGetWidth(cvimgRef)
var height:size_t = CVPixelBufferGetHeight(cvimgRef)
// get the raw image bytes
let buf = UnsafeMutablePointer<UInt8>(CVPixelBufferGetBaseAddress(cvimgRef))
var bprow: size_t = CVPixelBufferGetBytesPerRow(cvimgRef)
var r = 0
var g = 0
var b = 0
for var y = 0; y < height; y++ {
for var x = 0; x < width * 4; x += 4 {
b+=buf[x](UnsafeMutablePointer(UInt8)) // fix
g+=buf[x + 1](UnsafeMutablePointer(Float)) // fix
r+=buf[x + 2](UnsafeMutablePointer(Int)) // fix
}
buf += bprow()
}
r /= 255 * (width*height)
g /= 255 * (width*height)
b /= 255 * (width*height)
}
// convert from rgb to hsv colourspace
var h = Float()
var s = Float()
var v = Float()
RGBtoHSV(r, g, b, &h, &s, &v)
// do a sanity check for blackness
if s > 0.5 && v > 0.5 {
// increment the valid frame count
validFrameCounter++
// filter the hue value - the filter is a simple band pass filter that removes any DC component and any high frequency noise
var filtered: Float = filter.processValue(h)
// have we collected enough frames for the filter to settle?
if validFrameCounter > minFramesForFilterToSettle {
// add the new value to the detector
detector.addNewValue(filtered, atTime: CACurrentMediaTime())
}
} else {
validFrameCounter = 0
// clear the detector - we only really need to do this once, just before we start adding valid samples
detector.reset()
}
}

You can actually do that
RGBtoHSV(r: r, g: g, b: b, h: &h, s: &s, v: &v)

Related

perform action every 3 time in each in for loop

I had a function for setting up a timer, how can I perform an action every 3 times in each(1,2,3 and 4 = 1 ,5 = 2, 6 = 3) still 6 or more in for loop?
func duration(interval:Double,rep:Int){
let queue = DispatchQueue.main
let timer = DispatchSource.makeTimerSource(queue: queue)
var num = 0
for i in 0...12{
if (i%3 == 0){
perform something
num ++
}else if (i%2 == 0){
perform something
num ++
}else{
perform something
num ++
}
if num == 12{
timer.cancel()
}
}
}```
The SwiftUI way to do this is a Timer publisher.
Example:
struct ContentView: View {
#State private var number = 0
#State private var multiple: Int?
private let startDate = Date()
private let timer = Timer.publish(every: 1, on: .main, in: .common).autoconnect()
var body: some View {
VStack {
Text("Number: \(number)")
Text("Multiple: \(multiple != nil ? String(multiple!) : "no")")
}
.onReceive(timer) { date in
let interval = Int(date.timeIntervalSince(startDate))
print(interval)
number = interval
if interval.isMultiple(of: 3) {
// multiple of 3
multiple = 3
} else if interval.isMultiple(of: 2) {
// multiple of 2 (careful - 6 will be ignored because it is already a multiple of 3)
multiple = 2
} else {
// neither multiple of 2 or 3
multiple = nil
}
if interval == 12 {
timer.upstream.connect().cancel()
}
}
}
}
Result:

Getting value from unSafeMutablePointer Int16 in Swift for audio data purposes

I'm working to convert to Swift this code which helps get me get audio data for visualizations. The code I'm working with in Obj C, which works well, is:
while (reader.status == AVAssetReaderStatusReading) {
AVAssetReaderTrackOutput *trackOutput = (AVAssetReaderTrackOutput *)[reader.outputs objectAtIndex:0];
self.sampleBufferRef = [trackOutput copyNextSampleBuffer];
if (self.sampleBufferRef) {
CMBlockBufferRef blockBufferRef = CMSampleBufferGetDataBuffer(self.sampleBufferRef);
size_t bufferLength = CMBlockBufferGetDataLength(blockBufferRef);
void *data = malloc(bufferLength);
CMBlockBufferCopyDataBytes(blockBufferRef, 0, bufferLength, data);
SInt16 *samples = (SInt16 *)data;
int sampleCount = bufferLength / bytesPerInputSample;
for (int i=0; i<sampleCount; i+=100) {
Float32 sample = (Float32) *samples++;
sample = decibel(sample);
sample = minMaxX(sample,noiseFloor,0);
tally += sample;
for (int j=1; j<channelCount; j++)
samples++;
tallyCount++;
if (tallyCount == downsampleFactor) {
sample = tally / tallyCount;
maximum = maximum > sample ? maximum : sample;
[fullSongData appendBytes:&sample length:sizeof(sample)];//tried dividing the sample by 2
tally = 0;
tallyCount = 0;
outSamples++;
}
}
CMSampleBufferInvalidate(self.sampleBufferRef);
CFRelease(self.sampleBufferRef);
free(data);
}
}
In Swift, I'm trying to write is this part:
while (reader.status == AVAssetReaderStatus.Reading) {
var trackOutput = reader.outputs[0] as! AVAssetReaderTrackOutput
self.sampleBufferRef = trackOutput.copyNextSampleBuffer()
if (self.sampleBufferRef != nil) {
let blockBufferRef = CMSampleBufferGetDataBuffer(self.sampleBufferRef)
let bufferLength = CMBlockBufferGetDataLength(blockBufferRef)
var data = NSMutableData(length: bufferLength)
CMBlockBufferCopyDataBytes(blockBufferRef, 0, bufferLength, data!.mutableBytes)
var samples = UnsafeMutablePointer<Int16>(data!.mutableBytes)
var sampleCount = Int32(bufferLength)/bytesPerInputSample
for var i = 0; i < Int(sampleCount); i++ {
var sampleValue = CGFloat(samples[i]) etc. etc.
However, when I println() sampleValue is just comes out (Opaque Value) in the console. I can't figure out how to actually read the sampleValue.
I'm new at trying to read audio data for visualization purposes. Any help on getting a buffer of audio data to work with would be helpful. Thank you.
Use stride?
let bytesPerInputSample = 4 // assumption ;)
var samplePtr = data.mutableBytes
for _ in stride(from: 0, to: data.length, by: bytesPerInputSample) {
let currentSample = Data(bytes: samplePtr, count: bytesPerInputSample)
// do whatever is needed with current sample
//...
// increase ptr by size of sample
samplePtr = samplePtr + bytesPerInputSample
}

eption 'NSInvalidArgumentException', reason: 'Attemped to add a SKNode which already has a parent

i cant figure out this problem. Im deleting a certain SpriteNode, than re-adding it sometimes on a condition, however it crashes every time im calling addChild(). I know a SpriteNode can only have one parent so i dont understand this. Here is the relevant code:
override func touchesBegan(touches: NSSet, withEvent event:UIEvent) {
var touch: UITouch = touches.anyObject() as UITouch
var location = touch.locationInNode(self)
var node = self.nodeAtPoint(location)
for var i=0; i < tileNodeArray.count; i++
{
if (node == tileNodeArray[i]) {
flippedTilesCount++;
flippedTilesArray.append(tileNodeArray[i])
let removeAction = SKAction.removeFromParent()
tileNodeArray[i].runAction(removeAction)
if flippedTilesCount == 2
{
var helperNode1 = newMemoLabelNode("first",x: 0,y: 0,aka: "first")
var helperNode2 = newMemoLabelNode("second",x: 0,y: 0,aka: "second")
for var k = 0; k < labelNodeArray.count ;k++
{
if labelNodeArray[k].position == flippedTilesArray[0].position
{
helperNode1 = labelNodeArray[k]
}
if labelNodeArray[k].position == flippedTilesArray[1].position
{
helperNode2 = labelNodeArray[k]
}
}
if helperNode1.name == helperNode2.name
{
erasedTiles = erasedTiles + 2;
}
else
{
for var j = 0; j < flippedTilesArray.count ;j++
{
let waitAction = SKAction.waitForDuration(1.0)
flippedTilesArray[j].runAction(waitAction)
//self.addChild(flippedTilesArray[j]);
}
}
flippedTilesCount = 0;
flippedTilesArray = []
println("erased tiles:")
println(erasedTiles)
}
}
}
}
Appreciate your help!
I would recommend you not to use SKAction.removeFromParent but remove the node itself by calling:
tileNodeArray[i].removeFromParent()
instead of:
let removeAction = SKAction.removeFromParent()
tileNodeArray[i].runAction(removeAction)
The problem might be, that the SKActions don't wait for each other to finish. For example if you call the waitAction, the other actions will keep running.

iOS device orientation disregarding orientation lock

I want to query the orientation the iPhone is currently in. Using
[UIDevice currentDevice].orientation
works as long as the device isn't orientation-locked. If it is locked, however, it always responds with the locked orientation, not with the actual orientation of the device.
Is there a high-level way to get the actual device orientation?
Also you can use CoreMotion
Orientation detection algorithm:
if abs( y ) < abs( x ) your iPhone is in landscape position, look sign of x to detect right
or left
else your iPhone is in portrait position, look sign of y to detect up or upside-down.
If you are interested in face-up or down, look value of z.
import CoreMotion
var uMM: CMMotionManager!
override func
viewWillAppear( p: Bool ) {
super.viewWillAppear( p )
uMM = CMMotionManager()
uMM.accelerometerUpdateInterval = 0.2
// Using main queue is not recommended. So create new operation queue and pass it to startAccelerometerUpdatesToQueue.
// Dispatch U/I code to main thread using dispach_async in the handler.
uMM.startAccelerometerUpdatesToQueue( NSOperationQueue() ) { p, _ in
if p != nil {
println(
abs( p.acceleration.y ) < abs( p.acceleration.x )
? p.acceleration.x > 0 ? "Right" : "Left"
: p.acceleration.y > 0 ? "Down" : "Up"
)
}
}
}
override func
viewDidDisappear( p: Bool ) {
super.viewDidDisappear( p )
uMM.stopAccelerometerUpdates()
}
That functionality is correct. If it always returned the device orientation, even if it was locked, the orientation changed notifications would fire. This would defeat the purpose of the lock.
To answer your question, there is no way to read the raw values from the accelerometer, without using private APIs.
Edit:
After reviewing the documentation, it seems that the UIAccelerometer class provides this data, even when the orientation is locked. This change was applied in iOS 4 and above. Even though you can use this data, you still need to process it to determine the orientation. This is not an easy task as you need to monitor the changes constantly and compare them to older values.
Also, take a look at this guide for handling motion events. This may provide you with another route to determining the orientation.
Set up your view controller or whatever to support the UIAccelerometerProtocol, and start listening for changes (you can set it to 10 hz).
#define kAccelerometerFrequency 10.0 //Hz
-(void)viewDidAppear:(BOOL)animated {
DLog(#"viewDidAppear");
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
UIAccelerometer* a = [UIAccelerometer sharedAccelerometer];
a.updateInterval = 1 / kAccelerometerFrequency;
a.delegate = self;
}
-(void)viewWillDisappear:(BOOL)animated {
DLog(#"viewWillDisappear");
UIAccelerometer* a = [UIAccelerometer sharedAccelerometer];
a.delegate = nil;
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
}
#ifdef DEBUG
+(NSString*)orientationToText:(const UIInterfaceOrientation)ORIENTATION {
switch (ORIENTATION) {
case UIInterfaceOrientationPortrait:
return #"UIInterfaceOrientationPortrait";
case UIInterfaceOrientationPortraitUpsideDown:
return #"UIInterfaceOrientationPortraitUpsideDown";
case UIInterfaceOrientationLandscapeLeft:
return #"UIInterfaceOrientationLandscapeLeft";
case UIInterfaceOrientationLandscapeRight:
return #"UIInterfaceOrientationLandscapeRight";
}
return #"Unknown orientation!";
}
#endif
#pragma mark UIAccelerometerDelegate
-(void)accelerometer:(UIAccelerometer *)accelerometer didAccelerate:(UIAcceleration *)acceleration {
UIInterfaceOrientation orientationNew;
if (acceleration.x >= 0.75) {
orientationNew = UIInterfaceOrientationLandscapeLeft;
}
else if (acceleration.x <= -0.75) {
orientationNew = UIInterfaceOrientationLandscapeRight;
}
else if (acceleration.y <= -0.75) {
orientationNew = UIInterfaceOrientationPortrait;
}
else if (acceleration.y >= 0.75) {
orientationNew = UIInterfaceOrientationPortraitUpsideDown;
}
else {
// Consider same as last time
return;
}
if (orientationNew == orientationLast)
return;
NSLog(#"Going from %# to %#!", [[self class] orientationToText:orientationLast], [[self class] orientationToText:orientationNew]);
orientationLast = orientationNew;
}
#pragma mark -
You need to define UIInterfaceOrientation orientationLast as a member variable and you're set.
Handling all 6 orientations
Though we don't often care about FaceUp / FaceDown orientations, they're still important.
Taking them into account leads to a much more appropriate sensitivity for orientation changes, while leaving them out can lead to metastability & hysteresis.
Here's how I handled it -
- (void)startMonitoring
{
[self.motionManager startAccelerometerUpdatesToQueue:self.opQueue withHandler:^(CMAccelerometerData * _Nullable accelerometerData, NSError * _Nullable error) {
if (error != nil)
{
NSLog(#"Accelerometer error: %#", error);
}
else
{
float const threshold = 40.0;
BOOL (^isNearValue) (float value1, float value2) = ^BOOL(float value1, float value2)
{
return fabsf(value1 - value2) < threshold;
};
BOOL (^isNearValueABS) (float value1, float value2) = ^BOOL(float value1, float value2)
{
return isNearValue(fabsf(value1), fabsf(value2));
};
float yxAtan = (atan2(accelerometerData.acceleration.y, accelerometerData.acceleration.x)) * 180 / M_PI;
float zyAtan = (atan2(accelerometerData.acceleration.z, accelerometerData.acceleration.y)) * 180 / M_PI;
float zxAtan = (atan2(accelerometerData.acceleration.z, accelerometerData.acceleration.x)) * 180 / M_PI;
UIDeviceOrientation orientation = self.orientation;
if (isNearValue(-90.0, yxAtan) && isNearValueABS(180.0, zyAtan))
{
orientation = UIDeviceOrientationPortrait;
}
else if (isNearValueABS(180.0, yxAtan) && isNearValueABS(180.0, zxAtan))
{
orientation = UIDeviceOrientationLandscapeLeft;
}
else if (isNearValueABS(0.0, yxAtan) && isNearValueABS(0.0, zxAtan))
{
orientation = UIDeviceOrientationLandscapeRight;
}
else if (isNearValue(90.0, yxAtan) && isNearValueABS(0.0, zyAtan))
{
orientation = UIDeviceOrientationPortraitUpsideDown;
}
else if (isNearValue(-90.0, zyAtan) && isNearValue(-90.0, zxAtan))
{
orientation = UIDeviceOrientationFaceUp;
}
else if (isNearValue(90.0, zyAtan) && isNearValue(90.0, zxAtan))
{
orientation = UIDeviceOrientationFaceDown;
}
if (self.orientation != orientation)
{
dispatch_async(dispatch_get_main_queue(), ^{
[self orientationDidChange:orientation];
});
}
}
}];
}
Additionally, I've added a threshold value of 40.0 (instead of 45.0). This makes changes less sensitive, preventing hysteresis at inflection points.
If you only want to react to changes of the main 4 orientations, just do this
if (UIDeviceOrientationIsPortrait(orientation) || UIDeviceOrientationIsLandscape(orientation))
{
// Do something
}
The UIAccelerometer class continues to function when the device orientation is locked. You'll have to work out your own methods of turning its variables into orientation values, but it shouldn't be especially complicated.
Have a play with Apple's AcceleromoterGraph sample app to see what values the accelerometer outputs in different orientations.
my solution using coremotion,it work even when the device has his orientation locked.
let motionManager: CMMotionManager = CMMotionManager()
on the did load method
motionManager.deviceMotionUpdateInterval = 0.01
if motionManager.accelerometerAvailable{
let queue = NSOperationQueue()
motionManager.startAccelerometerUpdatesToQueue(queue, withHandler:
{data, error in
guard let data = data else{
return
}
let angle = (atan2(data.acceleration.y,data.acceleration.x))*180/M_PI;
print(angle)
if(fabs(angle)<=45){
self.orientation = AVCaptureVideoOrientation.LandscapeLeft
print("landscape left")
}else if((fabs(angle)>45)&&(fabs(angle)<135)){
if(angle>0){
self.orientation = AVCaptureVideoOrientation.PortraitUpsideDown
print("portrait upside Down")
}else{
self.orientation = AVCaptureVideoOrientation.Portrait
print("portrait")
}
}else{
self.orientation = AVCaptureVideoOrientation.LandscapeRight
print("landscape right")
}
}
)
} else {
print("Accelerometer is not available")
}
hope it helps.
Most of the answers are using accelerometer, which is the overall acceleration = user + gravity.
But to get a device orientation, it is more accurate to use the gravity acceleration. Using gravity will prevent the edge case when user moves in a particular direction. To access the gravity, we have to use startDeviceMotionUpdates API instead.
let motionManager = CMMotionManager()
motionManager.startDeviceMotionUpdates(to: OperationQueue()) { (data, error) in
guard let gravity = data?.gravity else { return }
let newDeviceOrientation: UIDeviceOrientation
if abs(gravity.y) < abs(gravity.x) {
newDeviceOrientation = gravity.x > 0 ? .landscapeRight : .landscapeLeft
} else {
newDeviceOrientation = gravity.y > 0 ? .portraitUpsideDown : .portrait
}
}
Here isĀ an example of detect device rotation and return UIDeviceOrientation.
This solution using CoreMotion and works in all cases.
Example
let orientationManager = APOrientationManager()
orientationManager.delegate = self
/// start detect rotation
orientationManager.startMeasuring()
/// get current interface orientation
let orientation = orientationManager.currentInterfaceOrientation()
print(orientation.rawValue)
/// stop detect rotation
orientationManager.stopMeasuring()
orientationManager.delegate = nil
conform delegate
extension ViewController: APOrientationManagerDelegate {
func didChange(deviceOrientation: UIDeviceOrientation) {
/// update UI in main thread
}
}
APOrientationManager.swift
import Foundation
import CoreMotion
import AVFoundation
import UIKit
protocol APOrientationManagerDelegate: class {
func didChange(deviceOrientation: UIDeviceOrientation)
}
class APOrientationManager {
private let motionManager = CMMotionManager()
private let queue = OperationQueue()
private var deviceOrientation: UIDeviceOrientation = .unknown
weak var delegate: APOrientationManagerDelegate?
init() {
motionManager.accelerometerUpdateInterval = 1.0
motionManager.deviceMotionUpdateInterval = 1.0
motionManager.gyroUpdateInterval = 1.0
motionManager.magnetometerUpdateInterval = 1.0
}
func startMeasuring() {
guard motionManager.isDeviceMotionAvailable else {
return
}
motionManager.startAccelerometerUpdates(to: queue) { [weak self] (accelerometerData, error) in
guard let strongSelf = self else {
return
}
guard let accelerometerData = accelerometerData else {
return
}
let acceleration = accelerometerData.acceleration
let xx = -acceleration.x
let yy = acceleration.y
let z = acceleration.z
let angle = atan2(yy, xx)
var deviceOrientation = strongSelf.deviceOrientation
let absoluteZ = fabs(z)
if deviceOrientation == .faceUp || deviceOrientation == .faceDown {
if absoluteZ < 0.845 {
if angle < -2.6 {
deviceOrientation = .landscapeRight
} else if angle > -2.05 && angle < -1.1 {
deviceOrientation = .portrait
} else if angle > -0.48 && angle < 0.48 {
deviceOrientation = .landscapeLeft
} else if angle > 1.08 && angle < 2.08 {
deviceOrientation = .portraitUpsideDown
}
} else if z < 0 {
deviceOrientation = .faceUp
} else if z > 0 {
deviceOrientation = .faceDown
}
} else {
if z > 0.875 {
deviceOrientation = .faceDown
} else if z < -0.875 {
deviceOrientation = .faceUp
} else {
switch deviceOrientation {
case .landscapeLeft:
if angle < -1.07 {
deviceOrientation = .portrait
}
if angle > 1.08 {
deviceOrientation = .portraitUpsideDown
}
case .landscapeRight:
if angle < 0 && angle > -2.05 {
deviceOrientation = .portrait
}
if angle > 0 && angle < 2.05 {
deviceOrientation = .portraitUpsideDown
}
case .portraitUpsideDown:
if angle > 2.66 {
deviceOrientation = .landscapeRight
}
if angle < 0.48 {
deviceOrientation = .landscapeLeft
}
case .portrait:
if angle > -0.47 {
deviceOrientation = .landscapeLeft
}
if angle < -2.64 {
deviceOrientation = .landscapeRight
}
default:
if angle > -0.47 {
deviceOrientation = .landscapeLeft
}
if angle < -2.64 {
deviceOrientation = .landscapeRight
}
}
}
}
if strongSelf.deviceOrientation != deviceOrientation {
strongSelf.deviceOrientation = deviceOrientation
strongSelf.delegate?.didChange(deviceOrientation: deviceOrientation)
}
}
}
func stopMeasuring() {
motionManager.stopAccelerometerUpdates()
}
func currentInterfaceOrientation() -> AVCaptureVideoOrientation {
switch deviceOrientation {
case .portrait:
return .portrait
case .landscapeRight:
return .landscapeLeft
case .landscapeLeft:
return .landscapeRight
case .portraitUpsideDown:
return .portraitUpsideDown
default:
return .portrait
}
}
}
Use of CMMotionManager may help, but not the above way. The above logic is not a stable one. I have tested throughly and found that by seeing the values of acceleration.x/y/z are not helping to determine the orientation.
Instead, I got a way to find the orientation WRT the angle i.e.
float angle = (atan2(accelerometerData.acceleration.y,accelerometerData.acceleration.x))*180/M_PI;
And for orientation,-
if(fabs(angle<=45)currOrientation=UIDeviceOrientationLandscapeRight;
else if((fabs(angle)>45)&&(fabs(angle)<135))currOrientation=((angle>0)?UIDeviceOrientationPortraitUpsideDown:UIDeviceOrientationPortrait);
else currOrientation = UIDeviceOrientationLandscapeLeft;
This might come handy for someone, though this doesn't help me to find 2 other orientations i.e. UIDeviceOrientationFaceUp & UIDeviceOrientationFaceDown.
Using Satachito's great answer here is code which will also detect if the device is face up or face down
import CoreMotion
var mm: CMMotionManager!
init() {
self.mm = CMMotionManager()
self.mm.accelerometerUpdateInterval = 0.2
}
public func startOrientationUpdates() {
// Using main queue is not recommended. So create new operation queue and pass it to startAccelerometerUpdatesToQueue.
// Dispatch U/I code to main thread using dispach_async in the handler.
self.mm.startAccelerometerUpdates( to: OperationQueue() ) { p, _ in
if let p = p {
if(p.acceleration.x > -0.3 && p.acceleration.x < 0.3 && p.acceleration.z < -0.95) {
print("face up")
}
else if(p.acceleration.x > -0.3 && p.acceleration.x < 0.3 && p.acceleration.z > 0.95) {
print("face down")
}
else {
print(
abs( p.acceleration.y ) < abs( p.acceleration.x )
? p.acceleration.x > 0 ? "Right" : "Left"
: p.acceleration.y > 0 ? "Down" : "Up"
)
}
}
}
}
public func endOrientationUpdates() {
self.mm.stopAccelerometerUpdates()
}

How to determine the direction of a iPhone shake

I am using the accelerometer to scroll multiple subViews in a UIScrollVIew. I want the view (portrait orientation) to scroll to the right when the user flicks the iPhone to the right, and scroll to the left when the device is flicked to the left.
I thought I could do that just by noting positive or negative x acceleration values, but I see that the values are usually a mixture of positive and negative values. I have set the floor at 1.5g to eliminate non-shake movement, and am looking at the x values over the duration of .5 seconds.
I'm sure there is a trigonometrical method for determining the overall direction of a flick, and that you have to measure values over the duration of the flick motion. I'm also sure that someone has already figured this one out.
Any ideas out there?
Thanks
I developed a solution which gives me better feedback than the proposed solution (only for left and right shake).
The way I did it here is quite sensitive (recognizes a small shake), but sensitivity can be adapted by changing tresholdFirstMove and tresholdBackMove (increase for lower sensitivity)
In Swift :
(in your viewController. And add "import CoreMotion")
var startedLeftTilt = false
var startedRightTilt = false
var dateLastShake = NSDate(timeIntervalSinceNow: -2)
var dateStartedTilt = NSDate(timeIntervalSinceNow: -2)
var motionManager = CMMotionManager()
let tresholdFirstMove = 3.0
let tresholdBackMove = 0.5
override func viewDidLoad() {
// your code
motionManager.gyroUpdateInterval = 0.01
}
override func viewDidAppear(animated: Bool) {
super.viewDidAppear(animated)
motionManager.startGyroUpdatesToQueue(NSOperationQueue.currentQueue(), withHandler: { (gyroData, error) -> Void in
self.handleGyroData(gyroData.rotationRate)
})
}
private func handleGyroData(rotation: CMRotationRate) {
if fabs(rotation.z) > tresholdFirstMove && fabs(dateLastShake.timeIntervalSinceNow) > 0.3
{
if !startedRightTilt && !startedLeftTilt
{
dateStartedTilt = NSDate()
if (rotation.z > 0)
{
startedLeftTilt = true
startedRightTilt = false
}
else
{
startedRightTilt = true
startedLeftTilt = false
}
}
}
if fabs(dateStartedTilt.timeIntervalSinceNow) >= 0.3
{
startedRightTilt = false
startedLeftTilt = false
}
else
{
if (fabs(rotation.z) > tresholdBackMove)
{
if startedLeftTilt && rotation.z < 0
{
dateLastShake = NSDate()
startedRightTilt = false
startedLeftTilt = false
println("\\\n Shaked left\n/")
}
else if startedRightTilt && rotation.z > 0
{
dateLastShake = NSDate()
startedRightTilt = false
startedLeftTilt = false
println("\\\n Shaked right\n/")
}
}
}
}
OK, worked out a solution. When I detect a shake motion (acceleration greater than 1.5 on the x axis), I start a timer and set a BOOL to true. While the BOOL is true I add acceleration values. When the timer expires, I stop adding acceleration values and determine direction of the shake by the sign of the total acceleration.
- (void)accelerometer:(UIAccelerometer *)acel didAccelerate:(UIAcceleration *)aceler {
if (fabsf(aceler.x) > 1.5)
{
shake = YES;
NSTimeInterval myInterval = .75;
[NSTimer scheduledTimerWithTimeInterval:myInterval target:self selector:#selector(endShake) userInfo:nil repeats:NO];
return;
}
if(shake)
{
totalG += aceler.x;
}
}
- (void) endShake {
shake = NO;
int direction;
if (totalG isLessThan 0) direction = 1;
if(totalG isGreaterThan 0) direction = -1;
[self changePageByShake:direction];
totalG = 0;
}
Note: I couldn't get the < and > symbols to format correctly in the codeblock above, so I substituted isLessThan and isGreaterThan for the symbols.