How to reset HIDIdleTime on macOS 10.14 - swift

For the past couple of days I've been trying to write an application that would reset the IORegistry > IOHIDSystem > HIDIdleTime entry. The end goal would be to prevent other applications that read this value from marking the user as idle (it's not only about power management or preventing sleep). Assume that sandboxing is disabled and the application has all necessary permissions (such as accessibility access).
Here are my attempts at doing this (unsuccessful so far):
Attempt 1 - move the mouse cursor to simulate activity:
Variant 1:
let mouseCursorPosition = CGPoint(x: Int.random(in: 0...500), y: Int.random(in: 0...500))
CGWarpMouseCursorPosition(mouseCursorPosition)
Variant 2:
CGDisplayMoveCursorToPoint(CGMainDisplayID(), mouseCursorPosition)
Variant 3 (using CGEvent by itself or together with one of the 2 variants above):
let moveEvent = CGEvent(mouseEventSource: nil, mouseType:
CGEventType.mouseMoved, mouseCursorPosition: mouseCursorPosition,
mouseButton: CGMouseButton.left)
moveEvent?.post(tap: CGEventTapLocation.cghidEventTap)
Variant 4 (using IOHIDSetMouseLocation / IOHIDPostEvent):
func moveCursor() {
let service = IOServiceGetMatchingService(kIOMasterPortDefault, IOServiceMatching("IOHIDSystem"))
if (service == 0) { return }
var connect:io_connect_t = 0
let result = IOServiceOpen(service, mach_task_self_, UInt32(kIOHIDParamConnectType), &connect)
IOObjectRelease(service)
if (result == kIOReturnSuccess) {
let cursorX = Int16.random(in: 0...100)
let cursorY = Int16.random(in: 0...100)
IOHIDSetMouseLocation(connect, Int32(cursorX), Int32(cursorY))
let cursorLocation:IOGPoint = IOGPoint(x: cursorX, y: cursorY)
var event:NXEventData = NXEventData()
IOHIDPostEvent(connect, UInt32(NX_MOUSEMOVED), cursorLocation, &event, 0, 0, 0)
}
}
NOTE: I've later learned that starting with macOS 10.12, IOHIDPostEvent doesn't reset HIDIdleTime (source: https://github.com/tekezo/Karabiner-Elements/issues/385). Also tried simulating keypresses without success.
Attempt 2 - overwrite the value directly in the IORegistry
func overwriteValue() -> Bool {
var iterator: io_iterator_t = 0
defer { IOObjectRelease(iterator) }
guard IOServiceGetMatchingServices(kIOMasterPortDefault, IOServiceMatching("IOHIDSystem"), &iterator) == kIOReturnSuccess else { return false }
let entry: io_registry_entry_t = IOIteratorNext(iterator)
defer { IOObjectRelease(entry) }
guard entry != 0 else { return false }
var value:NSInteger = 0;
var convertedValue:CFNumber = CFNumberCreate(kCFAllocatorDefault, CFNumberType.nsIntegerType, &value);
let result = IORegistryEntrySetCFProperty(entry, "HIDIdleTime" as CFString, convertedValue)
if (result != kIOReturnSuccess) { return false }
return true
}
While this seems to work (the function above returns true), the value is then overwritten by the system, which keeps track of the actual idle time in memory. Got a bit of insight into this from the source code release by Apple for IOHIDSystem here. Currently using this script to easily monitor system idle time and test solutions.
Any suggestions are greatly appreciated. If at all possible, I'm trying to avoid writing my own virtual driver (although I'm open to hooking into an existing one and simulating events if at all possible).

The thing is that the registry property isn't a normal property, but is generated on the fly every time properties are queried (see _idleTimeSerializerCallback in the source).
Long story short, you need to force lastUndimEvent to be reset, which you can do with external method 6 of an IOHIDParamUserClient.
I don't speak Swift, but here is some C code that does precisely that:
// clang -o t t.c -Wall -O3 -framework CoreFoundation -framework IOKit
#include <stdio.h>
#include <stdint.h>
#include <mach/mach.h>
#include <CoreFoundation/CoreFoundation.h>
extern const mach_port_t kIOMasterPortDefault;
typedef mach_port_t io_object_t;
typedef io_object_t io_service_t;
typedef io_object_t io_connect_t;
kern_return_t IOObjectRelease(io_object_t object);
CFMutableDictionaryRef IOServiceMatching(const char *name) CF_RETURNS_RETAINED;
io_service_t IOServiceGetMatchingService(mach_port_t master, CFDictionaryRef matching CF_RELEASES_ARGUMENT);
kern_return_t IOServiceOpen(io_service_t service, task_t task, uint32_t type, io_connect_t *client);
kern_return_t IOServiceClose(io_connect_t client);
kern_return_t IOConnectCallScalarMethod(io_connect_t client, uint32_t selector, const uint64_t *in, uint32_t inCnt, uint64_t *out, uint32_t *outCnt);
const uint32_t kIOHIDParamConnectType = 1;
const uint32_t kIOHIDActivityUserIdle = 3;
const uint32_t kIOHIDActivityReport = 0;
const uint32_t kIOHIDParam_extSetStateForSelector = 6;
#define LOG(str, args...) do { fprintf(stderr, str "\n", ##args); } while(0)
int hid_reset(void)
{
int retval = -1;
kern_return_t ret = 0;
io_service_t service = MACH_PORT_NULL;
io_connect_t client = MACH_PORT_NULL;
service = IOServiceGetMatchingService(kIOMasterPortDefault, IOServiceMatching("IOHIDSystem"));
LOG("service: %x", service);
if(!MACH_PORT_VALID(service)) goto out;
ret = IOServiceOpen(service, mach_task_self(), kIOHIDParamConnectType, &client);
LOG("client: %x, %s", client, mach_error_string(ret));
if(ret != KERN_SUCCESS || !MACH_PORT_VALID(client)) goto out;
uint64_t in[] = { kIOHIDActivityUserIdle, kIOHIDActivityReport };
ret = IOConnectCallScalarMethod(client, kIOHIDParam_extSetStateForSelector, in, 2, NULL, NULL);
LOG("extSetStateForSelector: %s", mach_error_string(ret));
if(ret != KERN_SUCCESS) goto out;
retval = 0;
out:;
if(MACH_PORT_VALID(client)) IOServiceClose(client);
if(MACH_PORT_VALID(service)) IOObjectRelease(service);
return retval;
}
int main(void)
{
return hid_reset();
}
It works for me on High Sierra as non-root, haven't tested it elsewhere. I do run a non-standard system configuration though, so if you get an error saying (iokit/common) not permitted on the external method, it's likely you're hitting mac_iokit_check_hid_control and might need additional entitlements, accessibility clearance, or something like that.

For Wine, we've discovered that we needed to use two different functions to get the full effects we were looking for. One is deprecated, but I could find no non-deprecated replacement. Maybe one of them will be enough for your purposes:
/* This wakes from display sleep, but doesn't affect the screen saver. */
static IOPMAssertionID assertion;
IOPMAssertionDeclareUserActivity(CFSTR("Wine user input"), kIOPMUserActiveLocal, &assertion);
/* This prevents the screen saver, but doesn't wake from display sleep. */
/* It's deprecated, but there's no better alternative. */
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
UpdateSystemActivity(UsrActivity);
#pragma clang diagnostic pop

Related

Binary operator < cannot be applied to Clong in Swift

I am trying to implement the following code in swift. But my i variable refuse to talk to my MAXADDRS. It says binary operator < cannot be applied to Clong in Swift. If I use CInt the problem goes away, but then I get an error on the variable i when assiginin theAddr = ip_addrs[i]
InitAddresses();
GetIPAddresses();
GetHWAddresses();
var i = CLong()
var deviceIP = NSString()
for (i=0; i < MAXADDRS; ++i)
{
var localHost = 0x7F000001; // 127.0.0.1
var theAddr = CLong()
theAddr = ip_addrs[i]
if (theAddr == 0) {return}
if (theAddr == localHost){continue}
NSLog("Name: %s MAC: %s IP: %s\n", if_names[i], hw_addrs[i], ip_names[i]);
//decided what adapter you want details for
if (strncmp(if_names[i], "en", 2) == 0)
{
NSLog("Adapter en has a IP of %s", ip_names[i]);
}
}
// Do any additional setup after loading the view, typically from a nib.
}
The MAXADDRS it intends to compare relates to the following OBC header
Source files here
http://www.chrisandtennille.com/code/IPAddress.h
http://www.chrisandtennille.com/code/IPAddress.c
My bridging header
#include "IPAddress.h"
#include "IPAddress.c"
#define MAXADDRS 32
is imported to Swift as
public var MAXADDRS: Int32 { get }
On the other hand, CLong is an alias for Int ("The C 'long' type.")
Therefore you need to convert all values to a common type. Since
array subscripting requires an Int index, converting MAXADDRS
to Int might be the easiest solution:
var i = 0 // Int
for (i=0; i < Int(MAXADDRS); ++i) {
}
or more simply:
for i in 0 ..< Int(MAXADDRS) {
}

How to listen to global hotkeys with Swift in a macOS app?

I'm trying to have a handler in my Mac OS X app written in Swift for a global (system-wide) hotkey combo but I just cannot find proper documentation for it. I've read that I'd have to mess around in some legacy Carbon API for it, is there no better way? Can you show me some proof of concept Swift code? Thanks in advance!
Since Swift 2.0, you can now pass a function pointer to C APIs.
var gMyHotKeyID = EventHotKeyID()
gMyHotKeyID.signature = OSType("swat".fourCharCodeValue)
gMyHotKeyID.id = UInt32(keyCode)
var eventType = EventTypeSpec()
eventType.eventClass = OSType(kEventClassKeyboard)
eventType.eventKind = OSType(kEventHotKeyPressed)
// Install handler.
InstallEventHandler(GetApplicationEventTarget(), {(nextHanlder, theEvent, userData) -> OSStatus in
var hkCom = EventHotKeyID()
GetEventParameter(theEvent, EventParamName(kEventParamDirectObject), EventParamType(typeEventHotKeyID), nil, sizeof(EventHotKeyID), nil, &hkCom)
// Check that hkCom in indeed your hotkey ID and handle it.
}, 1, &eventType, nil, nil)
// Register hotkey.
let status = RegisterEventHotKey(UInt32(keyCode), UInt32(modifierKeys), gMyHotKeyID, GetApplicationEventTarget(), 0, &hotKeyRef)
I don't believe you can do this in 100% Swift today. You'll need to call InstallEventHandler() or CGEventTapCreate(), and both of those require a CFunctionPointer, which can't be created in Swift. Your best plan is to use established ObjC solutions such as DDHotKey and bridge to Swift.
You can try using NSEvent.addGlobalMonitorForEventsMatchingMask(handler:), but that only makes copies of events. You can't consume them. That means the hotkey will also be passed along to the currently active app, which can cause problems. Here's an example, but I recommend the ObjC approach; it's almost certainly going to work better.
let keycode = UInt16(kVK_ANSI_X)
let keymask: NSEventModifierFlags = .CommandKeyMask | .AlternateKeyMask | .ControlKeyMask
func handler(event: NSEvent!) {
if event.keyCode == self.keycode &&
event.modifierFlags & self.keymask == self.keymask {
println("PRESSED")
}
}
// ... to set it up ...
let options = NSDictionary(object: kCFBooleanTrue, forKey: kAXTrustedCheckOptionPrompt.takeUnretainedValue() as NSString) as CFDictionaryRef
let trusted = AXIsProcessTrustedWithOptions(options)
if (trusted) {
NSEvent.addGlobalMonitorForEventsMatchingMask(.KeyDownMask, handler: self.handler)
}
This also requires that accessibility services be approved for this app. It also doesn't capture events that are sent to your own application, so you have to either capture them with your responder chain, our use addLocalMointorForEventsMatchingMask(handler:) to add a local handler.
The following code works for me for Swift 5.0.1. This solution is the combination of the solution from the accepted answer by Charlie Monroe and the recommendation by Rob Napier to use DDHotKey.
DDHotKey seems to work out of the box but it had one limitation that I had to change: the eventKind is hardcoded to kEventHotKeyReleased while I needed both kEventHotKeyPressed and kEventHotKeyReleased event types.
eventSpec.eventKind = kEventHotKeyReleased;
If you want to handle both Pressed and Released events, just add a second InstallEventHandler call which registers the other event kind.
This the complete example of the code that registers the "Command + R" key for the kEventHotKeyReleased type.
import Carbon
extension String {
/// This converts string to UInt as a fourCharCode
public var fourCharCodeValue: Int {
var result: Int = 0
if let data = self.data(using: String.Encoding.macOSRoman) {
data.withUnsafeBytes({ (rawBytes) in
let bytes = rawBytes.bindMemory(to: UInt8.self)
for i in 0 ..< data.count {
result = result << 8 + Int(bytes[i])
}
})
}
return result
}
}
class HotkeySolution {
static
func getCarbonFlagsFromCocoaFlags(cocoaFlags: NSEvent.ModifierFlags) -> UInt32 {
let flags = cocoaFlags.rawValue
var newFlags: Int = 0
if ((flags & NSEvent.ModifierFlags.control.rawValue) > 0) {
newFlags |= controlKey
}
if ((flags & NSEvent.ModifierFlags.command.rawValue) > 0) {
newFlags |= cmdKey
}
if ((flags & NSEvent.ModifierFlags.shift.rawValue) > 0) {
newFlags |= shiftKey;
}
if ((flags & NSEvent.ModifierFlags.option.rawValue) > 0) {
newFlags |= optionKey
}
if ((flags & NSEvent.ModifierFlags.capsLock.rawValue) > 0) {
newFlags |= alphaLock
}
return UInt32(newFlags);
}
static func register() {
var hotKeyRef: EventHotKeyRef?
let modifierFlags: UInt32 =
getCarbonFlagsFromCocoaFlags(cocoaFlags: NSEvent.ModifierFlags.command)
let keyCode = kVK_ANSI_R
var gMyHotKeyID = EventHotKeyID()
gMyHotKeyID.id = UInt32(keyCode)
// Not sure what "swat" vs "htk1" do.
gMyHotKeyID.signature = OSType("swat".fourCharCodeValue)
// gMyHotKeyID.signature = OSType("htk1".fourCharCodeValue)
var eventType = EventTypeSpec()
eventType.eventClass = OSType(kEventClassKeyboard)
eventType.eventKind = OSType(kEventHotKeyReleased)
// Install handler.
InstallEventHandler(GetApplicationEventTarget(), {
(nextHanlder, theEvent, userData) -> OSStatus in
// var hkCom = EventHotKeyID()
// GetEventParameter(theEvent,
// EventParamName(kEventParamDirectObject),
// EventParamType(typeEventHotKeyID),
// nil,
// MemoryLayout<EventHotKeyID>.size,
// nil,
// &hkCom)
NSLog("Command + R Released!")
return noErr
/// Check that hkCom in indeed your hotkey ID and handle it.
}, 1, &eventType, nil, nil)
// Register hotkey.
let status = RegisterEventHotKey(UInt32(keyCode),
modifierFlags,
gMyHotKeyID,
GetApplicationEventTarget(),
0,
&hotKeyRef)
assert(status == noErr)
}
}
A quick Swift 3 update for the setup:
let opts = NSDictionary(object: kCFBooleanTrue, forKey: kAXTrustedCheckOptionPrompt.takeUnretainedValue() as NSString) as CFDictionary
guard AXIsProcessTrustedWithOptions(opts) == true else { return }
NSEvent.addGlobalMonitorForEvents(matching: .keyDown, handler: self.handler)
I maintain this Swift package that makes it easy to both add global keyboard shortcuts to your app and also let the user set their own.
import SwiftUI
import KeyboardShortcuts
// Declare the shortcut for strongly-typed access.
extension KeyboardShortcuts.Name {
static let toggleUnicornMode = Self("toggleUnicornMode")
}
#main
struct YourApp: App {
#StateObject private var appState = AppState()
var body: some Scene {
WindowGroup {
// …
}
Settings {
SettingsScreen()
}
}
}
#MainActor
final class AppState: ObservableObject {
init() {
// Register the listener.
KeyboardShortcuts.onKeyUp(for: .toggleUnicornMode) { [self] in
isUnicornMode.toggle()
}
}
}
// Present a view where the user can set the shortcut they want.
struct SettingsScreen: View {
var body: some View {
Form {
HStack(alignment: .firstTextBaseline) {
Text("Toggle Unicorn Mode:")
KeyboardShortcuts.Recorder(for: .toggleUnicornMode)
}
}
}
}
SwiftUI is used in this example, but it also supports Cocoa.
Take a look at the HotKey Library. You can simply use Carthage to implement it into your own app.
HotKey Library
there is a pretty hacky, but also pretty simple workaround if your app has a Menu:
add a new MenuItem (maybe call it something like "Dummy for Hotkey")
in the attributes inspector, conveniently enter your hotkey in the Key Equivalent field
set Allowed when Hidden, Enabled and Hidden to true
link it with an IBAction to do whatever your hotkey is supposed to do
done!

How to determine binary image architecture at runtime?

Crash log contains "Binary Images" section with information about architecture (armv6/armv7) and identifier of all loaded modules. How to determine this information at runtime? (at least, just for application executable)
NSBundle has method executableArchitectures, but how to determine which architecture is running?
Alright time for the long answer. The mach headers of the dyld images in the application contain the information you are looking for. I have added an example that I only tested to work and nothing else so I would not recommend pasting it directly into production code. What it does it get all of the mach headers for all of the currently loaded dyld images and prints an output very similar to the Binary Images section of the crash log. The methods I call are not thread safe. The one thing I am missing is the end address to the binary image because I did not bother looking up how to find that.
Main.m
#import <UIKit/UIKit.h>
#include <string.h>
#import <mach-o/loader.h>
#import <mach-o/dyld.h>
#import <mach-o/arch.h>
void printImage(const struct mach_header *header)
{
uint8_t *header_ptr = (uint8_t*)header;
typedef struct load_command load_command;
const NXArchInfo *info = NXGetArchInfoFromCpuType(header->cputype, header->cpusubtype);
//Print the architecture ex. armv7
printf("%s ", info->name);
header_ptr += sizeof(struct mach_header);
load_command *command = (load_command*)header_ptr;
for(int i = 0; i < header->ncmds > 0; ++i)
{
if(command->cmd == LC_UUID)
{
struct uuid_command ucmd = *(struct uuid_command*)header_ptr;
CFUUIDRef cuuid = CFUUIDCreateFromUUIDBytes(kCFAllocatorDefault, *((CFUUIDBytes*)ucmd.uuid));
CFStringRef suuid = CFUUIDCreateString(kCFAllocatorDefault, cuuid);
CFStringEncoding encoding = CFStringGetFastestEncoding(suuid);
//Print UUID
printf("<%s> ", CFStringGetCStringPtr(suuid, encoding));
CFRelease(cuuid);
CFRelease(suuid);
break;
}
header_ptr += command->cmdsize;
command = (load_command*)header_ptr;
}
}
void printBinaryImages()
{
printf("Binary Images:\n");
//Get count of all currently loaded DYLD
uint32_t count = _dyld_image_count();
for(uint32_t i = 0; i < count; i++)
{
//Name of image (includes full path)
const char *dyld = _dyld_get_image_name(i);
//Get name of file
int slength = strlen(dyld);
int j;
for(j = slength - 1; j>= 0; --j)
if(dyld[j] == '/') break;
//strndup only available in iOS 4.3
char *name = strndup(dyld + ++j, slength - j);
printf("%s ", name);
free(name);
const struct mach_header *header = _dyld_get_image_header(i);
//print address range
printf("0x%X - ??? ", (uint32_t)header);
printImage(header);
//print file path
printf("%s\n", dyld);
}
printf("\n");
}
int main(int argc, char *argv[])
{
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
printBinaryImages();
[pool release];
return retVal;
}
Example output:
Binary Images:
TestBed 0x1000 - ??? i386 <E96D079C-E035-389D-AA12-71E968C76BFE> /Users/username/Library/Application Support/iPhone Simulator/4.3/Applications/6F64D9F8-9179-4E21-AE32-4D4604BE77E5/TestBed.app/TestBed
UIKit 0x8000 - ??? i386 <72030911-362F-3E47-BAF3-ACD2CB6F88C0> /Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator4.3.sdk/System/Library/Frameworks/UIKit.framework/UIKit
Foundation 0x772000 - ??? i386 <EB718CBD-1D57-3D31-898D-7CFA9C172A46> /Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator4.3.sdk/System/Library/Frameworks/Foundation.framework/Foundation
CoreGraphics 0xA10000 - ??? i386 <D168A716-71F2-337A-AE0B-9DCF51AE9181> /Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator4.3.sdk/System/Library/Frameworks/CoreGraphics.framework/CoreGraphics
libSystem.dylib 0xCAA000 - ??? i386 <8DF0AFCD-FFA5-3049-88E2-7410F8398749> /Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator4.3.sdk/usr/lib/libSystem.dylib
...
For a quick answer about architecture alone since you are building your application you can check some preprocessor defines to determine the current architecture that your application is built for. Make sure you check for the highest version of arm available first because each newer version defines all older versions.
#if __arm__
#import <arm/arch.h>
#ifdef __ARM_ARCH_6K__
//This is armv6
#endif //__ARM_ARCH_6K__
#endif //__arm__
We can use sysctl, sysctlbyname system call to get or set system information.
Sample code:
#import <sys/sysctl.h>
#import <mach/machine.h>
int32_t value = 0;
size_t length = sizeof(value);
sysctlbyname("hw.cputype", &value, &length, NULL, 0);
if (value == CPU_TYPE_ARM64) {
// arm64
}
else if (value == CPU_TYPE_ARM) {
// armv7/armv7s
}
else if (value == CPU_TYPE_X86) {
// simulator
}
I just list most common arch at 2016. Look for "hw.cpusubtype" to get more detial, like CPU_SUBTYPE_ARM_V6 CPU_SUBTYPE_ARM_V7 CPU_SUBTYPE_ARM_V7S

Detecting if iOS app is run in debugger

I set up my application to either send debugging output to console or a log file. Now, I'd like to decide with in the code whether
it is run in the debugger (or simulator) and have thus a console window where I would like to read the output directly or if
there is no console window and thus, the output should be redirected to a file.
Is there a way to determine if the app runs in the debugger?
There's a function from Apple to detect whether a program is being debugged in the Technical Q&A 1361 (entry in Mac library and entry in iOS library; they are identical).
Code from the Technical Q&A:
#include <assert.h>
#include <stdbool.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/sysctl.h>
static bool AmIBeingDebugged(void)
// Returns true if the current process is being debugged (either
// running under the debugger or has a debugger attached post facto).
{
int junk;
int mib[4];
struct kinfo_proc info;
size_t size;
// Initialize the flags so that, if sysctl fails for some bizarre
// reason, we get a predictable result.
info.kp_proc.p_flag = 0;
// Initialize mib, which tells sysctl the info we want, in this case
// we're looking for information about a specific process ID.
mib[0] = CTL_KERN;
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_PID;
mib[3] = getpid();
// Call sysctl.
size = sizeof(info);
junk = sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &size, NULL, 0);
assert(junk == 0);
// We're being debugged if the P_TRACED flag is set.
return ( (info.kp_proc.p_flag & P_TRACED) != 0 );
}
Also pay attention to this note at the end of the Q&A:
Important: Because the definition of the kinfo_proc structure (in <sys/sysctl.h>) is conditionalized by __APPLE_API_UNSTABLE, you should restrict use of the above code to the debug build of your program.
It is possible to instruct the debugger to set environment variables when it launches a process it is about to debug. This can be done in Xcode by going to the menu item Product->Edit Scheme. Then under the Debug scheme's Arguments tab add a new environment variable. The variable should be named "debugger" with the value "true". Then the following code snippet can be used to determine if the debugger launched your process:
NSDictionary* env = [NSProcessInfo processInfo].environment;
if ([env[#"debugger"] isEqual:#"true"]) {
NSLog(#"debugger yes");
}
else {
NSLog(#"debugger no");
}
For those who are looking for a simpler solution - this works perfectly:
func isDebuggerAttached() -> Bool {
return getppid() != 1
}
The simplest solution actually is
_isDebugging = isatty(STDERR_FILENO);
It isn't exactly the same as telling whether the app is running under debugger, but good enough (even better?) to determine whether the log should be written to disk.
Based off an answer in a duplicate thread that was for Objective-C as well and showed how HockeyApp-iOS does it, here's a Swift 5 version:
let isDebuggerAttached: Bool = {
var debuggerIsAttached = false
var name: [Int32] = [CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid()]
var info: kinfo_proc = kinfo_proc()
var info_size = MemoryLayout<kinfo_proc>.size
let success = name.withUnsafeMutableBytes { (nameBytePtr: UnsafeMutableRawBufferPointer) -> Bool in
guard let nameBytesBlindMemory = nameBytePtr.bindMemory(to: Int32.self).baseAddress else { return false }
return -1 != sysctl(nameBytesBlindMemory, 4, &info/*UnsafeMutableRawPointer!*/, &info_size/*UnsafeMutablePointer<Int>!*/, nil, 0)
}
// The original HockeyApp code checks for this; you could just as well remove these lines:
if !success {
debuggerIsAttached = false
}
if !debuggerIsAttached && (info.kp_proc.p_flag & P_TRACED) != 0 {
debuggerIsAttached = true
}
return debuggerIsAttached
}()
Always good to have different solutions, so here are my two cents:
The idea is to check the stderr filehandle (this is where NSLog prints to). This solution has reliably been working since at least iOS 4 and keeps doing so in iOS 9, both on the simulator and device.
#import <sys/ioctl.h>
#import <sys/param.h>
#if TARGET_IPHONE_SIMULATOR
#import <sys/conf.h>
#else
// Not sure why <sys/conf.h> is missing on the iPhoneOS.platform.
// It's there on iPhoneSimulator.platform, though. We need it for D_DISK, only:
#if ! defined(D_DISK)
#define D_DISK 2
#endif
#endif
BOOL isDebuggerAttatchedToConsole(void)
{
// We use the type of the stderr file descriptor
// to guess if a debugger is attached.
int fd = STDERR_FILENO;
// is the file handle open?
if (fcntl(fd, F_GETFD, 0) < 0) {
return NO;
}
// get the path of stderr's file handle
char buf[MAXPATHLEN + 1];
if (fcntl(fd, F_GETPATH, buf ) >= 0) {
if (strcmp(buf, "/dev/null") == 0)
return NO;
if (strncmp(buf, "/dev/tty", 8) == 0)
return YES;
}
// On the device, without attached Xcode, the type is D_DISK (otherwise it's D_TTY)
int type;
if (ioctl(fd, FIODTYPE, &type) < 0) {
return NO;
}
return type != D_DISK;
}
I usually go for a much more simple solution; is the binary compiled with optimizations?
A debug build is not optimized, and logs are nice. A release build should have optimizations and not as many logs. You can check for this with the __OPTIMIZE__ symbol.
For logging I use this setup for logg-functions:
#ifdef __OPTIMIZE__
#define CWLog(...)
#define CWLogDebug(...)
#define CWLogInfo(...)
#else
#define CWLog(...) NSLog(__VA_ARGS__)
#define CWLogDebug( s, ... ) NSLog( #"DEBUG <%p %#:(%d)> %#", self, [[NSString stringWithUTF8String:__FILE__] lastPathComponent], __LINE__, [NSString stringWithFormat:(s), ##__VA_ARGS__] )
#ifndef LOG_INFO
#define CWLogInfo(...)
#else
#define CWLogInfo( s, ... ) NSLog( #"INFO <%p %#:(%d)> %#", self, [[NSString stringWithUTF8String:__FILE__] lastPathComponent], __LINE__, [NSString stringWithFormat:(s), ##__VA_ARGS__] )
#endif
#endif
#define CWLogWarning( s, ... ) NSLog( #"WARNING <%p %#:(%d)> %#", self, [[NSString stringWithUTF8String:__FILE__] lastPathComponent], __LINE__, [NSString stringWithFormat:(s), ##__VA_ARGS__] )
#define CWLogError( s, ... ) NSLog( #"ERROR <%p %#:(%d)> %#", self, [[NSString stringWithUTF8String:__FILE__] lastPathComponent], __LINE__, [NSString stringWithFormat:(s), ##__VA_ARGS__] )
Why not using conditional compilation block in Swift?
#if DEBUG
// Do something.
#endif
Any objection?
You can define if you want a runtime constant
#if DEBUG
public let IS_RUNNING_IN_DEBUGGER: Bool = true
#else
public let IS_RUNNING_IN_DEBUGGER: Bool = false
#endif
The same approach can be used in Objc & more.

Transmission of float values over TCP/IP and data corruption

I have an extremely strange bug.
I have two applications that communicate over TCP/IP.
Application A is the server, and application B is the client.
Application A sends a bunch of float values to application B every 100 milliseconds.
The bug is the following: sometimes some of the float values received by application B are not the same as the values transmitted by application A.
Initially, I thought there was a problem with the Ethernet or TCP/IP drivers (some sort of data corruption). I then tested the code in other Windows machines, but the problem persisted.
I then tested the code on Linux (Ubuntu 10.04.1 LTS) and the problem is still there!!!
The values are logged just before they are sent and just after they are received.
The code is pretty straightforward: the message protocol has a 4 byte header like this:
//message header
struct MESSAGE_HEADER {
unsigned short type;
unsigned short length;
};
//orientation message
struct ORIENTATION_MESSAGE : MESSAGE_HEADER
{
float azimuth;
float elevation;
float speed_az;
float speed_elev;
};
//any message
struct MESSAGE : MESSAGE_HEADER {
char buffer[512];
};
//receive specific size of bytes from the socket
static int receive(SOCKET socket, void *buffer, size_t size) {
int r;
do {
r = recv(socket, (char *)buffer, size, 0);
if (r == 0 || r == SOCKET_ERROR) break;
buffer = (char *)buffer + r;
size -= r;
} while (size);
return r;
}
//send specific size of bytes to a socket
static int send(SOCKET socket, const void *buffer, size_t size) {
int r;
do {
r = send(socket, (const char *)buffer, size, 0);
if (r == 0 || r == SOCKET_ERROR) break;
buffer = (char *)buffer + r;
size -= r;
} while (size);
return r;
}
//get message from socket
static bool receive(SOCKET socket, MESSAGE &msg) {
int r = receive(socket, &msg, sizeof(MESSAGE_HEADER));
if (r == SOCKET_ERROR || r == 0) return false;
if (ntohs(msg.length) == 0) return true;
r = receive(socket, msg.buffer, ntohs(msg.length));
if (r == SOCKET_ERROR || r == 0) return false;
return true;
}
//send message
static bool send(SOCKET socket, const MESSAGE &msg) {
int r = send(socket, &msg, ntohs(msg.length) + sizeof(MESSAGE_HEADER));
if (r == SOCKET_ERROR || r == 0) return false;
return true;
}
When I receive the message 'orientation', sometimes the 'azimuth' value is different from the one sent by the server!
Shouldn't the data be the same all the time? doesn't TCP/IP guarantee delivery of the data uncorrupted? could it be that an exception in the math co-processor affects the TCP/IP stack? is it a problem that I receive a small number of bytes first (4 bytes) and then the message body?
EDIT:
The problem is in the endianess swapping routine. The following code swaps the endianess of a specific float around, and then swaps it again and prints the bytes:
#include <iostream>
using namespace std;
float ntohf(float f)
{
float r;
unsigned char *s = (unsigned char *)&f;
unsigned char *d = (unsigned char *)&r;
d[0] = s[3];
d[1] = s[2];
d[2] = s[1];
d[3] = s[0];
return r;
}
int main() {
unsigned long l = 3206974079;
float f1 = (float &)l;
float f2 = ntohf(ntohf(f1));
unsigned char *c1 = (unsigned char *)&f1;
unsigned char *c2 = (unsigned char *)&f2;
printf("%02X %02X %02X %02X\n", c1[0], c1[1], c1[2], c1[3]);
printf("%02X %02X %02X %02X\n", c2[0], c2[1], c2[2], c2[3]);
getchar();
return 0;
}
The output is:
7F 8A 26 BF
7F CA 26 BF
I.e. the float assignment probably normalizes the value, producing a different value from the original.
Any input on this is welcomed.
EDIT2:
Thank you all for your replies. It seems the problem is that the swapped float, when returned via the 'return' statement, is pushed in the CPU's floating point stack. The caller then pops the value from the stack, the value is rounded, but it is the swapped float, and therefore the rounding messes up the value.
TCP tries to deliver unaltered bytes, but unless the machines have similar CPU-s and operating-systems, there's no guarantee that the floating-point representation on one system is identical to that on the other. You need a mechanism for ensuring this such as XDR or Google's protobuf.
You're sending binary data over the network, using implementation-defined padding for the struct layout, so this will only work if you're using the same hardware, OS and compiler for both application A and application B.
If that's ok, though, I can't see anything wrong with your code. One potential issue is that you're using ntohs to extract the length of the message and that length is the total length minus the header length, so you need to make sure you setting it properly. It needs to be done as
msg.length = htons(sizeof(ORIENTATION_MESSAGE) - sizeof(MESSAGE_HEADER));
but you don't show the code that sets up the message...