SetProperty with AudioQueueStart how? - iphone

how can i set property of audioqueuestart ?
i want to recording sound of device and someone tell me use 'kAudioSessionCategory_PlayAndRecord'
UInt32 sessionCategory = kAudioSessionCategory_PlayAndRecord;
AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(sessionCategory), &sessionCategory);
but when apply my simple code, it doesn't work. how can i set property ?
#include <AudioToolbox/AudioQueue.h>
#include <AudioToolbox/AudioFile.h>
#include <AudioToolbox/AudioConverter.h>
#include <AudioToolbox/AudioToolbox.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/select.h>
#define AUDIO_BUFFERS 3
typedef struct AQCallbackStruct
{
AudioStreamBasicDescription mDataFormat;
AudioQueueRef queue;
AudioQueueBufferRef mBuffers[AUDIO_BUFFERS];
AudioFileID outputFile;
unsigned long frameSize;
long long recPtr;
int run;
} AQCallbackStruct;
static void AQInputCallback(
void *aqr,
AudioQueueRef inQ,
AudioQueueBufferRef inQB,
const AudioTimeStamp *timestamp,
unsigned long frameSize,
const AudioStreamPacketDescription *mDataFormat)
{
AQCallbackStruct *aqc = (AQCallbackStruct *) aqr;
/* Write data to file */
if (AudioFileWritePackets (aqc->outputFile, false, inQB->mAudioDataByteSize,
mDataFormat, aqc->recPtr, &frameSize, inQB->mAudioData) == noErr)
{
aqc->recPtr += frameSize;
}
/* Don't re-queue the sound buffers if we're supposed to stop recording */
if (!aqc->run)
return;
AudioQueueEnqueueBuffer (aqc->queue, inQB, 0, NULL);
}
int main(int argc, char *argv[])
{
AQCallbackStruct aqc;
AudioFileTypeID fileFormat;
CFURLRef filename;
struct timeval tv;
int i;
if (argc < 3)
{
fprintf(stderr, "Syntax: %s [filename.aif] [seconds]", argv[0]);
exit(EXIT_FAILURE);
}
//how ?
//UInt32 sessionCategory = kAudioSessionCategory_PlayAndRecord;
//AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(sessionCategory), &sessionCategory);
aqc.mDataFormat.mFormatID = kAudioFormatLinearPCM;//kAudioFormatLinearPCM;
aqc.mDataFormat.mSampleRate = 44100.0;
aqc.mDataFormat.mChannelsPerFrame = 2;
aqc.mDataFormat.mBitsPerChannel = 16;
aqc.mDataFormat.mBytesPerPacket =
aqc.mDataFormat.mBytesPerFrame =
aqc.mDataFormat.mChannelsPerFrame * sizeof (short int);
aqc.mDataFormat.mFramesPerPacket = 1;
aqc.mDataFormat.mFormatFlags =
kLinearPCMFormatFlagIsBigEndian
| kLinearPCMFormatFlagIsSignedInteger
| kLinearPCMFormatFlagIsPacked;
aqc.frameSize = 735;
AudioQueueNewInput (&aqc.mDataFormat, AQInputCallback, &aqc, NULL,
kCFRunLoopCommonModes, 0, &aqc.queue);
/* Create output file */
fileFormat = kAudioFileAIFFType;
filename = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8*)argv[1], strlen (argv[1]), false);
AudioFileCreateWithURL (
filename,
fileFormat,
&aqc.mDataFormat,
kAudioFileFlags_EraseFile,
&aqc.outputFile
);
/* Initialize the recording buffers */
for(i=0; i<AUDIO_BUFFERS; i++)
{
AudioQueueAllocateBuffer (aqc.queue, aqc.frameSize, &aqc.mBuffers[i]);
AudioQueueEnqueueBuffer (aqc.queue, aqc.mBuffers[i], 0, NULL);
//AudioQueueEnqueueBuffer (aqc.queue, aqc.mBuffers[i], 0x11, (const AudioStreamPacketDescription)"\x22");
}
aqc.recPtr = 0;
aqc.run = 1;
AudioQueueStart (aqc.queue, NULL);
/* Hang around for a while while the recording takes place */
tv.tv_sec = atof(argv[2]);
tv.tv_usec = 0;
select(0, NULL, NULL, NULL, &tv);
/* Shut down recording */
AudioQueueStop (aqc.queue, true);
aqc.run = 0;
AudioQueueDispose (aqc.queue, true);
AudioFileClose (aqc.outputFile);
exit(EXIT_SUCCESS);
}

Related

Convert audio Linear pcm to mp3 ( using LAME ) Low ,Medium,High audio Quality setting

I am encoding a LinearPCM to MP3 in iOS.I'm trying to encode the raw PCM data from microphone to MP3 using AudioToolbox framework and Lame.And although everything seems to run fine if i record an audio it is converted to mp3 with the help of lame encoding concept while play the recorded mp3 audio file working fine.now i want to convert audio(using lame) like low,medium,high quality mp3 file. i don't know exact setting(sample rate,bit depth,bit rate, chennal,quality) while lame conversion process
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
// NSLog(#"%f",inStartTime->mSampleTime);
try
{
if (inNumPackets > 0)
{
AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize, inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData);
aqr->mRecordPacket += inNumPackets;
int MP3_SIZE =inBuffer->mAudioDataByteSize * 4;
unsigned char mp3_buffer[MP3_SIZE];
lame_t lame = lame_init();
lame_set_in_samplerate(lame, 44100);
lame_set_VBR(lame, vbr_default);
lame_init_params(lame);
// int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);
int encodedBytes = lame_encode_buffer(lame, (short*)inBuffer->mAudioData, (short*)inBuffer->mAudioData, inNumPackets, mp3_buffer, MP3_SIZE);
[delegate.mp3AudioData appendBytes:mp3_buffer length:encodedBytes];
if (inBuffer->mAudioDataByteSize != 0) {
}
else
{
int encode=lame_encode_flush(lame, mp3_buffer, MP3_SIZE);
[delegate.mp3AudioData appendBytes:mp3_buffer length:encode];
}
lame_close(lame);
}
if (aqr->IsRunning())
{
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
}
} catch (CAXException e)
{
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
Try this,
Low quality:
AppDelegate *appDelegate = (AppDelegate *)[[UIApplication sharedApplication]delegate];
NSMutableDictionary *dictAudioQuality =[[NSMutableDictionary alloc]init];
[dictAudioQuality setValue:#"Low" forKey:#"audioquality"];
[dictAudioQuality setValue:#"11025" forKey:#"samplerate"];
[dictAudioQuality setValue:#"16" forKey:#"bitdepth"];
[dictAudioQuality setValue:#"120" forKey:#"bitrate"];
[dictAudioQuality setValue:#"1" forKey:#"channel"];
Medium Quality:
AppDelegate *appDelegate = (AppDelegate *)[[UIApplication sharedApplication]delegate];
NSMutableDictionary *dictAudioQuality =[[NSMutableDictionary alloc]init];
[dictAudioQuality setValue:#"Medium" forKey:#"audioquality"];
[dictAudioQuality setValue:#"22050" forKey:#"samplerate"];
[dictAudioQuality setValue:#"16" forKey:#"bitdepth"];
[dictAudioQuality setValue:#"240" forKey:#"bitrate"];
[dictAudioQuality setValue:#"1" forKey:#"channel"];
High Quality:
AppDelegate *appDelegate = (AppDelegate *)[[UIApplication sharedApplication]delegate];
NSMutableDictionary *dictAudioQuality =[[NSMutableDictionary alloc]init];
[dictAudioQuality setValue:#"High" forKey:#"audioquality"];
[dictAudioQuality setValue:#"44100" forKey:#"samplerate"];
[dictAudioQuality setValue:#"24" forKey:#"bitdepth"];
[dictAudioQuality setValue:#"320" forKey:#"bitrate"];
[dictAudioQuality setValue:#"2" forKey:#"channel"];
AQRecorder.m Start Record
void AQRecorder::StartRecord(CFStringRef inRecordFile)
{
int i, bufferByteSize;
UInt32 size;
delegate =[[UIApplication sharedApplication]delegate];
nSampleRate =[[delegate.dictMP3Quality valueForKey:#"samplerate"] intValue];
nBitRate =[[delegate.dictMP3Quality valueForKey:#"bitrate"] intValue];
nChannel =[[delegate.dictMP3Quality valueForKey:#"channel"] intValue];
try {
UInt32 category = kAudioSessionCategory_RecordAudio;
OSStatus error = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
if (error) printf("couldn't set audio category!");
// specify the recording format
SetupAudioFormat(kAudioFormatLinearPCM);
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
this /* userData */,
NULL /* run loop */, NULL /* run loop mode */,
0 /* flags */, &mQueue), "AudioQueueNewInput failed");
// get the record format back from the queue's audio converter --
// the file may require a more specific stream description than was necessary to create the encoder.
mRecordPacket = 0;
size = sizeof(mRecordFormat);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
&mRecordFormat, &size), "couldn't get queue's format");
// copy the cookie first to give the file object as much info as we can about the data going in
// not necessary for pcm, but required for some compressed audio
CopyEncoderCookieToFile();
// allocate and enqueue buffers
bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for half a second
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
// start the queue
mIsRunning = true;
XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
lame = lame_init();
lame_set_in_samplerate(lame, mRecordFormat.mSampleRate);
lame_set_out_samplerate(lame, nSampleRate);
lame_set_num_channels(lame, nChannel);
// lame_set_brate(lame, nBitRate);
lame_set_VBR(lame, vbr_default);
lame_init_params(lame);
}
catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");;
}
}
AQRecorder::MyInputBufferHandler
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
try
{
if (inNumPackets > 0)
{
AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize, inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData);
aqr->mRecordPacket += inNumPackets;
int MP3_SIZE =inNumPackets * 4;
unsigned char mp3_buffer[MP3_SIZE];
memset(mp3_buffer, 0, sizeof(mp3_buffer));
// int encodedBytes=lame_encode_buffer_interleaved(lame, (short int*)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);
int encodedBytes = lame_encode_buffer(aqr->lame, (short int*)inBuffer->mAudioData, (short int*)inBuffer->mAudioData, inNumPackets, mp3_buffer, MP3_SIZE);
[aqr->delegate.mp3AudioData appendBytes:mp3_buffer length:encodedBytes];
if (inBuffer->mAudioDataByteSize != 0) {
}
else
{
int encode=lame_encode_flush(aqr->lame, mp3_buffer, MP3_SIZE);
[aqr->delegate.mp3AudioData appendBytes:mp3_buffer length:encode];
}
{
NSLog(#"------------");
NSLog(#"%d",encodedBytes);
NSLog(#"%lu",inNumPackets);
NSLog(#"%d",MP3_SIZE);
NSLog(#"------------");
}
}
if (aqr->IsRunning())
{
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
}
} catch (CAXException e)
{
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}

AudioUnit: How to get the mixed audio data

I used the AudioUnit to mix the two audio files,and ioUnit to record from microphone, i want to get the synched audio data with mixer and record, but in the "RenderNotify" callback i only get the record data,how can i do this?
AUGraphAddRenderNotify(processingGraph, auRenderCallbac, self);
static OSStatus auRenderCallbac(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
if(*ioActionFlags & kAudioUnitRenderAction_PostRender)
{
MixerHostAudio *THIS = (MixerHostAudio *)inRefCon;
AudioUnitSampleType samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
memset (&samples, 0, sizeof (samples));
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = samples;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(AudioUnitSampleType);
OSStatus err = AudioUnitRender(THIS.ioUnit, ioActionFlags,
inTimeStamp, 1, inNumberFrames, &bufferList);
printf("%lu \n",err);
unsigned char aacData[2048] = {0};
int inLen = inNumberFrames;
int outLen;
[THIS->aacEncoder encFuncWithInCode:(short *)bufferList.mBuffers[0].mData outCode:aacData inLen:&inLen outLen:&outLen];
fwrite(aacData, 1, outLen, THIS->outfile);
}
return noErr;
}
In this i only get the microphone's sounds but no the audio files's sounds

How to Encode AAC data from PCM data in iPhone SDK? (iphone dev/Audio)

I guess "AudioConverterFillComplexBuffer" is the solution.
But I don't know this way is right.
+1. AudioUnit
initialize AudioUnit : "recordingCallback" is callback method.
the output format is PCM.
record to file.( I played the recorded file).
+2. AudioConverter
add "AudioConverterFillComplexBuffer"
I don't know about it well. added,
+3. problem
"audioConverterComplexInputDataProc" method called only one time.
How can I use AudioConverter api?
Attached my code
#import "AACAudioRecorder.h"
#define kOutputBus 0
#define kInputBus 1
#implementation AACAudioRecorder
This is AudioConverterFillComplexBuffer's callback method.
static OSStatus audioConverterComplexInputDataProc( AudioConverterRef inAudioConverter,
UInt32* ioNumberDataPackets,
AudioBufferList* ioData,
AudioStreamPacketDescription** outDataPacketDescription,
void* inUserData){
ioData = (AudioBufferList*)inUserData;
return 0;
}
This is AudioUnit's callback.
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
#autoreleasepool {
AudioBufferList *bufferList;
AACAudioRecorder *THIS = (AACAudioRecorder *)inRefCon;
OSStatus err = AudioUnitRender(THIS-> m_audioUnit ,
ioActionFlags,
inTimeStamp, 1, inNumberFrames, bufferList);
if (err) { NSLog(#"%s AudioUnitRender error %d\n",__FUNCTION__, (int)err); return err; }
NSString *recordFile =
[NSTemporaryDirectory() stringByAppendingPathComponent: #"auioBuffer.pcm"];
FILE *fp;
fp = fopen([recordFile UTF8String], "a+");
fwrite(bufferList->mBuffers[0].mData, sizeof(Byte),
bufferList->mBuffers[0].mDataByteSize, fp);
fclose(fp);
[THIS convert:bufferList ioOutputDataPacketSize:&inNumberFrames];
if (err) {NSLog(#"%s : AudioFormat Convert error %d\n",__FUNCTION__, (int)err); }
}
return noErr;
}
status check method
static void checkStatus(OSStatus status, const char* str){
if (status != noErr) {
NSLog(#"%s %s error : %ld ",__FUNCTION__, str, status);
}
}
convert method : PCM -> AAC
- (void)convert:(AudioBufferList*)input_bufferList ioOutputDataPacketSize:(UInt32*)packetSizeRef
{
UInt32 size = sizeof(UInt32);
UInt32 maxOutputSize;
AudioConverterGetProperty(m_audioConverterRef,
kAudioConverterPropertyMaximumOutputPacketSize,
&size,
&maxOutputSize);
AudioBufferList *output_bufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList));
output_bufferList->mNumberBuffers = 1;
output_bufferList->mBuffers[0].mNumberChannels = 1;
output_bufferList->mBuffers[0].mDataByteSize = *packetSizeRef * 2;
output_bufferList->mBuffers[0].mData = (AudioUnitSampleType *)malloc(*packetSizeRef * 2);
OSStatus err;
err = AudioConverterFillComplexBuffer(
m_audioConverterRef,
audioConverterComplexInputDataProc,
input_bufferList,
packetSizeRef,
output_bufferList,
NULL
);
if (err) {NSLog(#"%s : AudioFormat Convert error %d\n",__FUNCTION__, (int)err); }
}
This is initialize method.
- (void)initialize
{
// ...
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &m_audioUnit);
checkStatus(status,"AudioComponentInstanceNew");
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(m_audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
checkStatus(status,"Enable IO for recording");
// Enable IO for playback
status = AudioUnitSetProperty(m_audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
checkStatus(status,"Enable IO for playback");
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(m_audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status,"Apply format1");
status = AudioUnitSetProperty(m_audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status,"Apply format2");
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(m_audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status,"Set input callback");
// Initialise
status = AudioUnitInitialize(m_audioUnit);
checkStatus(status,"AudioUnitInitialize");
// Set ASBD For converting Output Stream
AudioStreamBasicDescription outputFormat;
memset(&outputFormat, 0, sizeof(outputFormat));
outputFormat.mSampleRate = 44100.00;
outputFormat.mFormatID = kAudioFormatMPEG4AAC;
outputFormat.mFormatFlags = kMPEG4Object_AAC_Main;
outputFormat.mFramesPerPacket = 1024;
outputFormat.mChannelsPerFrame = 1;
outputFormat.mBitsPerChannel = 0;
outputFormat.mBytesPerFrame = 0;
outputFormat.mBytesPerPacket = 0;
//Create An Audio Converter
status = AudioConverterNew( &audioFormat, &outputFormat, &m_audioConverterRef );
checkStatus(status,"Create An Audio Converter");
if(m_audioConverterRef) NSLog(#"m_audioConverterRef is created");
}
AudioOutputUnitStart
- (void)StartRecord
{
OSStatus status = AudioOutputUnitStart(m_audioUnit);
checkStatus(status,"AudioOutputUnitStart");
}
AudioOutputUnitStop
- (void)StopRecord
{
OSStatus status = AudioOutputUnitStop(m_audioUnit);
checkStatus(status,"AudioOutputUnitStop");
}
finish
- (void)finish
{
AudioUnitUninitialize(m_audioUnit);
}
#end
It took me a long time to understand AudioConverterFillComplexBuffer, and especially how to use it to convert audio in real-time. I've posted my approach here: How do I use CoreAudio's AudioConverter to encode AAC in real-time?
Reference https://developer.apple.com/library/ios/samplecode/iPhoneACFileConvertTest/Introduction/Intro.html
It demonstrates using the Audio Converter APIs to convert from a PCM audio format to a compressed format including AAC.

Will it code work with NO jailbreak iphone?

I get
collect2: ld returned 1 exit status
usr/bin/g++4.2 failed with exit code 1
#import <stdio.h>
#import <string.h>
#import <mach/mach_host.h>
#import <sys/sysctl.h>
#include <CoreFoundation/CoreFoundation.h>
#include <IOKit/ps/IOPowerSources.h>
#include <IOKit/ps/IOPSKeys.h>
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <syslog.h>
void printMemoryInfo()
{
size_t length;
int mib[6];
int result;
printf("Memory Info\n");
printf("-----------\n");
int pagesize;
mib[0] = CTL_HW;
mib[1] = HW_PAGESIZE;
length = sizeof(pagesize);
if (sysctl(mib, 2, &pagesize, &length, NULL, 0) < 0)
{
perror("getting page size");
}
printf("Page size = %d bytes\n", pagesize);
printf("\n");
mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
vm_statistics_data_t vmstat;
if (host_statistics(mach_host_self(), HOST_VM_INFO, (host_info_t)&vmstat, &count) != KERN_SUCCESS)
{
printf("Failed to get VM statistics.");
}
double total = vmstat.wire_count + vmstat.active_count + vmstat.inactive_count + vmstat.free_count;
double wired = vmstat.wire_count / total;
double active = vmstat.active_count / total;
double inactive = vmstat.inactive_count / total;
double free = vmstat.free_count / total;
printf("Total = %8d pages\n", vmstat.wire_count + vmstat.active_count + vmstat.inactive_count + vmstat.free_count);
printf("\n");
printf("Wired = %8d bytes\n", vmstat.wire_count * pagesize);
printf("Active = %8d bytes\n", vmstat.active_count * pagesize);
printf("Inactive = %8d bytes\n", vmstat.inactive_count * pagesize);
printf("Free = %8d bytes\n", vmstat.free_count * pagesize);
printf("\n");
printf("Total = %8d bytes\n", (vmstat.wire_count + vmstat.active_count + vmstat.inactive_count + vmstat.free_count) * pagesize);
printf("\n");
printf("Wired = %0.2f %%\n", wired * 100.0);
printf("Active = %0.2f %%\n", active * 100.0);
printf("Inactive = %0.2f %%\n", inactive * 100.0);
printf("Free = %0.2f %%\n", free * 100.0);
printf("\n");
mib[0] = CTL_HW;
mib[1] = HW_PHYSMEM;
length = sizeof(result);
if (sysctl(mib, 2, &result, &length, NULL, 0) < 0)
{
perror("getting physical memory");
}
printf("Physical memory = %8d bytes\n", result);
mib[0] = CTL_HW;
mib[1] = HW_USERMEM;
length = sizeof(result);
if (sysctl(mib, 2, &result, &length, NULL, 0) < 0)
{
perror("getting user memory");
}
printf("User memory = %8d bytes\n", result);
printf("\n");
}
void printProcessorInfo()
{
size_t length;
int mib[6];
int result;
printf("Processor Info\n");
printf("--------------\n");
mib[0] = CTL_HW;
mib[1] = HW_CPU_FREQ;
length = sizeof(result);
if (sysctl(mib, 2, &result, &length, NULL, 0) < 0)
{
perror("getting cpu frequency");
}
printf("CPU Frequency = %d hz\n", result);
mib[0] = CTL_HW;
mib[1] = HW_BUS_FREQ;
length = sizeof(result);
if (sysctl(mib, 2, &result, &length, NULL, 0) < 0)
{
perror("getting bus frequency");
}
printf("Bus Frequency = %d hz\n", result);
printf("\n");
}
int printBatteryInfo()
{
CFTypeRef blob = IOPSCopyPowerSourcesInfo();
CFArrayRef sources = IOPSCopyPowerSourcesList(blob);
CFDictionaryRef pSource = NULL;
const void *psValue;
int numOfSources = CFArrayGetCount(sources);
if (numOfSources == 0) {
perror("Error getting battery info");
return 1;
}
printf("Battery Info\n");
printf("------------\n");
for (int i = 0 ; i < numOfSources ; i++)
{
pSource = IOPSGetPowerSourceDescription(blob, CFArrayGetValueAtIndex(sources, i));
if (!pSource) {
perror("Error getting battery info");
return 2;
}
psValue = (CFStringRef)CFDictionaryGetValue(pSource, CFSTR(kIOPSNameKey));
int curCapacity = 0;
int maxCapacity = 0;
int percent;
psValue = CFDictionaryGetValue(pSource, CFSTR(kIOPSCurrentCapacityKey));
CFNumberGetValue((CFNumberRef)psValue, kCFNumberSInt32Type, &curCapacity);
psValue = CFDictionaryGetValue(pSource, CFSTR(kIOPSMaxCapacityKey));
CFNumberGetValue((CFNumberRef)psValue, kCFNumberSInt32Type, &maxCapacity);
percent = (int)((double)curCapacity/(double)maxCapacity * 100);
printf ("powerSource %d of %d: percent: %d/%d = %d%%\n", i+1, CFArrayGetCount(sources), curCapacity, maxCapacity, percent);
printf("\n");
}
}
int printProcessInfo() {
int mib[5];
struct kinfo_proc *procs = NULL, *newprocs;
int i, st, nprocs;
size_t miblen, size;
/* Set up sysctl MIB */
mib[0] = CTL_KERN;
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_ALL;
mib[3] = 0;
miblen = 4;
/* Get initial sizing */
st = sysctl(mib, miblen, NULL, &size, NULL, 0);
/* Repeat until we get them all ... */
do {
/* Room to grow */
size += size / 10;
newprocs = realloc(procs, size);
if (!newprocs) {
if (procs) {
free(procs);
}
perror("Error: realloc failed.");
return (0);
}
procs = newprocs;
st = sysctl(mib, miblen, procs, &size, NULL, 0);
} while (st == -1 && errno == ENOMEM);
if (st != 0) {
perror("Error: sysctl(KERN_PROC) failed.");
return (0);
}
/* Do we match the kernel? */
assert(size % sizeof(struct kinfo_proc) == 0);
nprocs = size / sizeof(struct kinfo_proc);
if (!nprocs) {
perror("Error: printProcessInfo.");
return(0);
}
printf(" PID\tName\n");
printf("-----\t--------------\n");
for (i = nprocs-1; i >=0; i--) {
printf("%5d\t%s\n",(int)procs[i].kp_proc.p_pid, procs[i].kp_proc.p_comm);
}
free(procs);
return (0);
}
int main(int argc, char **argv)
{
printf("iPhone Hardware Info\n");
printf("====================\n");
printf("\n");
printMemoryInfo();
printProcessorInfo();
printBatteryInfo();
printProcessInfo();
return (0);
}
No. The iPhone SDK does not include IOKit as part of the default header search path, so it's unlikely that an app that uses it would be approved for the App Store. (It works in the simulator if you find and link IOKit manually, but a 30 second attempt to compile it for a device failed)

How get raw data in printf (buffer)? in audio recording iphone

I need raw data like 0 / 1
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
// AudioQueueBufferRef inBuffer,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
////AudioHelper *remoteIOplayer = (AudioHelper *)inRefCon;
signalTime = CFAbsoluteTimeGetCurrent();
AudioBufferList list;
// redundant
list.mNumberBuffers = 1;
list.mBuffers[0].mData = sampleBuffer;
list.mBuffers[0].mDataByteSize = 2 * inNumberFrames;
list.mBuffers[0].mNumberChannels = 1;
ioData = &list;
printf("No buffers: %d, buffer length: %d bus number: %d\n", ioData->mNumberBuffers, ioData->mBuffers[0].mData, inBusNumber);
SInt16* buf = (SInt16 *)ioData->mBuffers[0].mData;
for(int i=0; i< ioData->mBuffers[0].mDataByteSize / 2; i=i+2) {
printf("\n%d\n%d ", buf[i], buf[i+1]);
}