It only plays in ear mic!
I use Remote IO to playback
OSStatus status; // Describe audio component AudioComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_RemoteIO; desc.componentFlags = 0; desc.componentFlagsMask = 0; desc.componentManufacturer = kAudioUnitManufacturer_Apple; // Get component AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc); // Get audio units status = AudioComponentInstanceNew(inputComponent, &audioUnit); // Enable IO for recording UInt32 flag = 1; status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag)); // Enable IO for playback status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag)); // Describe format audioFormat.mSampleRate = 44100; audioFormat.mFormatID = kAudioFormatLinearPCM; audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; audioFormat.mFramesPerPacket = 1; audioFormat.mChannelsPerFrame = 1; audioFormat.mBitsPerChannel = 16; audioFormat.mBytesPerPacket = 2; audioFormat.mBytesPerFrame = 2;
// Apply format status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat)); AURenderCallbackStruct callbackStruct; // Set output callback callbackStruct.inputProc = playbackCallback; callbackStruct.inputProcRefCon = self; status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_SetRenderCallback,
//kAudioUnitScope_Global,
kAudioUnitScope_Output,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct)); // Set input callback
callbackStruct.inputProc = recordingCallback; callbackStruct.inputProcRefCon = self; status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
//kAudioUnitScope_Global,
kAudioUnitScope_Input,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own) flag = 0; status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus,
&flag,
sizeof(flag)); /* // TODO: Allocate our own buffers if we want
*/ // Initialise status = AudioUnitInitialize(audioUnit); AudioUnitSetParameter(audioUnit, kHALOutputParam_Volume,
kAudioUnitScope_Input, kInputBus,
1, 0);
Before playing audio file, set the AVAudioSession to AVAudioSessionCategoryPlayback
AVAudioSession * audioSession;
[audioSession setCategory:AVAudioSessionCategoryPlayback error: &error];
//Activate the session
[audioSession setActive:YES error: &error];
Related
I have an Objective-C example to an AURenderCallbackStruct, and I would like to do one in swift. I read it's not possible, is that correct?
Thank you.
You can format a callback function in Swift 4 like the following.
func recordingCallback(
inRefCon:UnsafeMutableRawPointer,
ioActionFlags:UnsafeMutablePointer<AudioUnitRenderActionFlags>,
inTimeStamp:UnsafePointer<AudioTimeStamp>,
inBusNumber:UInt32,
inNumberFrames:UInt32,
ioData:UnsafeMutablePointer<AudioBufferList>?) -> OSStatus {
return noErr
}
Note this must be a global function and not inside of a class.
You can then set your callback function with the following code:
var callbackStruct = AURenderCallbackStruct()
callbackStruct.inputProc = recordingCallback
callbackStruct.inputProcRefCon = nil
status = AudioUnitSetProperty(remoteIOUnit!,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
bus1,
&callbackStruct,
UInt32(MemoryLayout<AURenderCallbackStruct>.size));
if (status != noErr) {
return status // Do something with the error
}
Source
It's possible. Use the following code for your Audio Unit settings.
init() {
var status: OSStatus
do {
try AVAudioSession.sharedInstance().setPreferredIOBufferDuration(preferredIOBufferDuration)
} catch let error as NSError {
print(error)
}
var desc: AudioComponentDescription = AudioComponentDescription()
desc.componentType = kAudioUnitType_Output
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO
desc.componentFlags = 0
desc.componentFlagsMask = 0
desc.componentManufacturer = kAudioUnitManufacturer_Apple
let inputComponent: AudioComponent = AudioComponentFindNext(nil, &desc)
status = AudioComponentInstanceNew(inputComponent, &audioUnit)
checkStatus(status)
var flag = UInt32(1)
status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, UInt32(sizeof(UInt32)))
checkStatus(status)
status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, UInt32(sizeof(UInt32)))
checkStatus(status)
var audioFormat: AudioStreamBasicDescription! = AudioStreamBasicDescription()
audioFormat.mSampleRate = 8000
audioFormat.mFormatID = kAudioFormatLinearPCM
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked
audioFormat.mFramesPerPacket = 1
audioFormat.mChannelsPerFrame = 1
audioFormat.mBitsPerChannel = 16
audioFormat.mBytesPerPacket = 2
audioFormat.mBytesPerFrame = 2
status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, UInt32(sizeof(UInt32)))
checkStatus(status)
try! AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayAndRecord)
status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioFormat, UInt32(sizeof(UInt32)))
checkStatus(status)
// Set input/recording callback
var inputCallbackStruct = AURenderCallbackStruct(inputProc: recordingCallback, inputProcRefCon: UnsafeMutablePointer(unsafeAddressOf(self)))
AudioUnitSetProperty(audioUnit, AudioUnitPropertyID(kAudioOutputUnitProperty_SetInputCallback), AudioUnitScope(kAudioUnitScope_Global), 1, &inputCallbackStruct, UInt32(sizeof(AURenderCallbackStruct)))
// Set output/renderar/playback callback
var renderCallbackStruct = AURenderCallbackStruct(inputProc: playbackCallback, inputProcRefCon: UnsafeMutablePointer(unsafeAddressOf(self)))
AudioUnitSetProperty(audioUnit, AudioUnitPropertyID(kAudioUnitProperty_SetRenderCallback), AudioUnitScope(kAudioUnitScope_Global), 0, &renderCallbackStruct, UInt32(sizeof(AURenderCallbackStruct)))
flag = 0
status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_ShouldAllocateBuffer, kAudioUnitScope_Output, kInputBus, &flag, UInt32(sizeof(UInt32)))
}
I'm trying to create my own custom sound effects Audio Unit based on the input from the mic. This application allows simultaneous input/output from the microphone to speaker. I can apply effects and work using the simulator, but when I try to test on the iPhone I can't hear anything. I paste my code if anyone can help me:
- (id) init{
self = [super init];
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &audioUnit);
checkStatus(status);
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
checkStatus(status);
// Enable IO for playback
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
checkStatus(status);
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status);
// Set output callback
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status);
// Allocate our own buffers (1 channel, 16 bits per sample, thus 16 bits per frame, thus 2 bytes per frame).
// Practice learns the buffers used contain 512 frames, if this changes it will be fixed in processAudio.
tempBuffer.mNumberChannels = 1;
tempBuffer.mDataByteSize = 512 * 2;
tempBuffer.mData = malloc( 512 * 2 );
// Initialise
status = AudioUnitInitialize(audioUnit);
checkStatus(status);
return self;
}
This callback is called when new audio data from the microphone is available. But never enter here when I test on the iPhone:
static OSStatus recordingCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) {
AudioBuffer buffer;
buffer.mNumberChannels = 1;
buffer.mDataByteSize = inNumberFrames * 2;
buffer.mData = malloc( inNumberFrames * 2 );
// Put buffer in a AudioBufferList
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0] = buffer;
// Then:
// Obtain recorded samples
OSStatus status;
status = AudioUnitRender([iosAudio audioUnit],
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);
checkStatus(status);
// Now, we have the samples we just read sitting in buffers in bufferList
// Process the new data
[iosAudio processAudio:&bufferList];
// release the malloc'ed data in the buffer we created earlier
free(bufferList.mBuffers[0].mData);
return noErr;
}
I solved my problem. I simply needed to initialize the AudioSession before playing/recording. I did so with the following code:
OSStatus status;
AudioSessionInitialize(NULL, NULL, NULL, self);
UInt32 sessionCategory = kAudioSessionCategory_PlayAndRecord;
status = AudioSessionSetProperty (kAudioSessionProperty_AudioCategory,
sizeof (sessionCategory),
&sessionCategory);
if (status != kAudioSessionNoError)
{
if (status == kAudioServicesUnsupportedPropertyError) {
NSLog(#"AudioSessionInitialize failed: unsupportedPropertyError");
}else if (status == kAudioServicesBadPropertySizeError) {
NSLog(#"AudioSessionInitialize failed: badPropertySizeError");
}else if (status == kAudioServicesBadSpecifierSizeError) {
NSLog(#"AudioSessionInitialize failed: badSpecifierSizeError");
}else if (status == kAudioServicesSystemSoundUnspecifiedError) {
NSLog(#"AudioSessionInitialize failed: systemSoundUnspecifiedError");
}else if (status == kAudioServicesSystemSoundClientTimedOutError) {
NSLog(#"AudioSessionInitialize failed: systemSoundClientTimedOutError");
}else {
NSLog(#"AudioSessionInitialize failed! %ld", status);
}
}
AudioSessionSetActive(TRUE);
...
I investigate aurioTouch2 sample code. But I wanna record everything in file. aurioTouch doesn't provide this possibility. I tried to record data using this code in FFTBufferManager.cpp in void FFTBufferManager::GrabAudioData(AudioBufferList *inBL)
ExtAudioFileRef cafFile;
AudioStreamBasicDescription cafDesc;
cafDesc.mBitsPerChannel = 16;
cafDesc.mBytesPerFrame = 4;
cafDesc.mBytesPerPacket = 4;
cafDesc.mChannelsPerFrame = 2;
cafDesc.mFormatFlags = 0;
cafDesc.mFormatID = 'ima4';
cafDesc.mFramesPerPacket = 1;
cafDesc.mReserved = 0;
cafDesc.mSampleRate = 44100;
CFStringRef refH;
refH = CFStringCreateWithCString(kCFAllocatorDefault, "/var/mobile/Applications/BD596ECF-A6F2-41EB-B4CE-3A9644B1C26A/Documents/voice2.caff", kCFStringEncodingUTF8);
CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault,
refH,
kCFURLPOSIXPathStyle,
false);
OSType status = ExtAudioFileCreateWithURL(
destinationURL, // inURL
'caff', // inFileType
&cafDesc, // inStreamDesc
NULL, // inChannelLayout
kAudioFileFlags_EraseFile, // inFlags
&cafFile // outExtAudioFile
); // returns 0xFFFFFFCE
ExtAudioFileWrite(cafFile, mNumberFrames, inBL);
And this works well, but I use AudioBufferList *inBL, and this is only small part of all audio data (about 1 second). This functions is called every 1 second to analize new audion data from microphone. So it would be great, if I can add data from one AudioBufferList to another AudioBufferList.
Or may be anybody know other approach.
You whould set up new AudioUnit to record audio (with its own callback function).
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &mAudioUnit);
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(mAudioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
// Enable IO for playback
status = AudioUnitSetProperty(mAudioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
// Describe format
AudioStreamBasicDescription audioFormat={0};
audioFormat.mSampleRate = kSampleRate;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(mAudioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
status = AudioUnitSetProperty(mAudioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = (__bridge void *)self;
status = AudioUnitSetProperty(mAudioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
flag = 0;
status = AudioUnitSetProperty(mAudioUnit,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus,
&flag,
sizeof(flag));
// On initialise le fichier audio
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *destinationFilePath = [[NSString alloc] initWithFormat: #"%#/output.caf", documentsDirectory];
NSLog(#">>> %#\n", destinationFilePath);
CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (__bridge CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);
OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &mAudioFileRef);
CFRelease(destinationURL);
NSAssert(setupErr == noErr, #"Couldn't create file for writing");
setupErr = ExtAudioFileSetProperty(mAudioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);
NSAssert(setupErr == noErr, #"Couldn't create file for format");
setupErr = ExtAudioFileWriteAsync(mAudioFileRef, 0, NULL);
NSAssert(setupErr == noErr, #"Couldn't initialize write buffers for audio file");
CheckError(AudioUnitInitialize(mAudioUnit), "AudioUnitInitialize");
CheckError(AudioOutputUnitStart(mAudioUnit), "AudioOutputUnitStart");
I am using below code to init my audio components.
-(void) startListeningWithCoreAudio
{
NSError *error = nil;
[[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategoryPlayAndRecord error:&error];
if (error)
NSLog(#"error setting up audio session: %#", [error localizedDescription]);
[[AVAudioSession sharedInstance] setDelegate:self];
OSStatus status = AudioSessionSetActive(YES);
checkStatus(status);
// Find the apple mic
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponent inputComponent = AudioComponentFindNext( NULL, &desc );
status = AudioComponentInstanceNew( inputComponent, &kAudioUnit );
checkStatus( status );
// enable mic output as our input
UInt32 flag = 1;
status = AudioUnitSetProperty( kAudioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag) );
checkStatus(status);
// Define mic output audio format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 16000.0;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
status = AudioUnitSetProperty( kAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, sizeof(audioFormat) );
checkStatus(status);
// Define our callback methods
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty( kAudioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &callbackStruct, sizeof(callbackStruct) );
checkStatus(status);
// By pass voice processing
UInt32 audiobypassProcessing = [[NSUserDefaults standardUserDefaults] boolForKey:VOICE_BY_PASS_PROCESSING];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_BypassVoiceProcessing,
kAudioUnitScope_Global, kInputBus, &audiobypassProcessing, sizeof(audiobypassProcessing));
checkStatus(status);
// Automatic Gain Control
UInt32 audioAGC = [[NSUserDefaults standardUserDefaults]boolForKey:VOICE_AGC];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_VoiceProcessingEnableAGC,
kAudioUnitScope_Global, kInputBus, &audioAGC, sizeof(audioAGC));
checkStatus(status);
//Non Audio Voice Ducking
UInt32 audioDucking = [[NSUserDefaults standardUserDefaults]boolForKey:VOICE_DUCKING];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_DuckNonVoiceAudio,
kAudioUnitScope_Global, kInputBus, &audioDucking, sizeof(audioDucking));
checkStatus(status);
//Audio Quality
UInt32 quality = [[NSUserDefaults standardUserDefaults]integerForKey:VOICE_QUALITY];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_VoiceProcessingQuality,
kAudioUnitScope_Global, kInputBus, &quality, sizeof(quality));
checkStatus(status);
status = AudioUnitInitialize(kAudioUnit);
checkStatus(status);
status = AudioOutputUnitStart( kAudioUnit );
checkStatus(status);
UInt32 audioRoute = (UInt32)kAudioSessionOverrideAudioRoute_Speaker;
status = AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute, sizeof (audioRoute), &audioRoute);
checkStatus(status);
}
-(void) stopListeningWithCoreAudio
{
OSStatus status = AudioUnitUninitialize( kAudioUnit );
checkStatus(status);
status = AudioOutputUnitStop( kAudioUnit );
checkStatus( status );
// if(kAudioUnit)
// {
// status = AudioComponentInstanceDispose(kAudioUnit);
// checkStatus(status);
// kAudioUnit = nil;
// }
status = AudioSessionSetActive(NO);
checkStatus(status);
NSError *error = nil;
[[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategorySoloAmbient error:&error];
if (error)
NSLog(#"error setting up audio session: %#", [error localizedDescription]);
}
It works fine for first time. I mean startListeningWithCoreAudio is called by a button pressed event. It could record/process audio well. On other event I am calling stopListeningWithCoreAudio to stop record/process audio.
The problem is coming when I am again trying to call the function startListeningWithCoreAudio. It throws error for two functions. AudioUnitInitialize and AudioOutputUnitStart which are called from startListeningWithCoreAudio.
Can anyone please help me what is the problem?
I found the solution.
If we call below functions back to back, it creates problem.
extern OSStatus AudioUnitUninitialize(AudioUnit inUnit)
extern OSStatus AudioComponentInstanceDispose(AudioComponentInstance inInstance)
So, I called dispose method on main thread by below way.
[self performSelectorOnMainThread:#selector(disposeCoreAudio) withObject:nil waitUntilDone:NO];
-(void) disposeCoreAudio
{
OSStatus status = AudioComponentInstanceDispose(kAudioUnit);
kAudioUnit = nil;
}
It solved the problem. So, the correct sequence is Stop recording, uninitialize recorder and dispose recorder on main thread.
One possible problem is that your code is trying to uninitialize a running audio unit before stopping it.
When I receive data from the microphone via core audio, sometimes the buffers have only one sample inside and sometimes they have 20 samples. some of the time the values of the samples are 0.00000 and sometimes their values are NaN, some of the time, they are what you would expect.
What is the problem?
Here is my code:
-(void)startListeningWithFrequency:(float)frequency;
{
OSStatus status;
//AudioComponentInstance audioUnit;
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
status = AudioComponentInstanceNew( inputComponent, &audioUnit);
checkStatus(status);
UInt32 flag = 1;
status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input,kInputBus, &flag, sizeof(flag));
checkStatus(status);
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;//44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
//status = AudioUnitSetProperty(audioUnit,
// kAudioUnitProperty_StreamFormat,
// kAudioUnitScope_Input,
// kOutputBus,
// &audioFormat,
// sizeof(audioFormat));
checkStatus(status);
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus, &callbackStruct, sizeof(callbackStruct));
checkStatus(status);
/* UInt32 shouldAllocateBuffer = 1;
AudioUnitSetProperty(audioUnit, kAudioUnitProperty_ShouldAllocateBuffer, kAudioUnitScope_Global, 1, &shouldAllocateBuffer, sizeof(shouldAllocateBuffer));
*/
//float bufferLength = 0.005;
//AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(bufferLength), &bufferLength);
status = AudioOutputUnitStart(audioUnit);
}
and the callback:
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
AudioBuffer buffer;
buffer.mNumberChannels = 1;
buffer.mDataByteSize = inNumberFrames * 2;
NSLog(#"%d",inNumberFrames);
buffer.mData = malloc( inNumberFrames * 2 );
// Put buffer in a AudioBufferList
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0] = buffer;
OSStatus status;
status = AudioUnitRender(audioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);
checkStatus(status);
double *q = (double *)(&bufferList)->mBuffers[0].mData;
for(int i=0; i < strlen((const char *)(&bufferList)->mBuffers[0].mData); i++)
{
//i sometimes doesn't get past 0, sometimes goes into 20s
NSLog(#"%f",q[i]);//returns NaN, 0.00, or some times actual data
}
}
Any help would be appreciated,
Thank you,
nonono
Since you are passing the kAudioFormatFlagIsSignedInteger flag for the stream format your samples are just that: 16-bit signed integers (int16_t) and not floats. You either need to treat the samples that way or use the kAudioFormatFlagIsFloat flag instead (and you would need to use float instead of double as datatypes, AFAIK).