I am trying to play the recorded content simultaneously while recording. Currently I am using AVAudioRecorder for recording and AVAudioPlayer for playing.
When I was trying to play the content simultaneously nothing is playing. Please find the pseudo code for what I am doing.
If I do the same stuff after stop the recording everything works fine.
AVAudioRecorder *recorder; //Initializing the recorder properly.
[recorder record];
NSError *error=nil;
NSUrl recordingPathUrl; //Contains the recording path.
AVAudioPlayer *audioPlayer = [[AVAudioPlayer alloc] initWithContentsOfURL:recordingPathUrl
error:&error];
[audioPlayer prepareToPlay];
[audioPlayer play];
Could you please anybody let me know your thoughts or ideas?
This is achievable , Use these link and download it:
https://code.google.com/p/ios-coreaudio-example/downloads/detail?name=Aruts.zip&can=2&q=
This link will play sound from speaker but will not record it , I have implemented record functionality as well Below is full code description..
IN .h File
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#ifndef max
#define max( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef min
#define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#interface IosAudioController : NSObject {
AudioComponentInstance audioUnit;
AudioBuffer tempBuffer; // this will hold the latest data from the microphone
ExtAudioFileRef mAudioFileRef;
}
#property (readonly)ExtAudioFileRef mAudioFileRef;
#property (readonly) AudioComponentInstance audioUnit;
#property (readonly) AudioBuffer tempBuffer;
- (void) start;
- (void) stop;
- (void) processAudio: (AudioBufferList*) bufferList;
#end
// setup a global iosAudio variable, accessible everywhere
extern IosAudioController* iosAudio;
IN .m
#import "IosAudioController.h"
#import <AudioToolbox/AudioToolbox.h>
#import <AVFoundation/AVFoundation.h>
#define kOutputBus 0
#define kInputBus 1
IosAudioController* iosAudio;
void checkStatus(int status){
if (status) {
printf("Status not 0! %d\n", status);
// exit(1);
}
}
static void printAudioUnitRenderActionFlags(AudioUnitRenderActionFlags * ioActionFlags)
{
if (*ioActionFlags == 0) {
printf("AudioUnitRenderActionFlags(%lu) ", *ioActionFlags);
return;
}
printf("AudioUnitRenderActionFlags(%lu): ", *ioActionFlags);
if (*ioActionFlags & kAudioUnitRenderAction_PreRender) printf("kAudioUnitRenderAction_PreRender ");
if (*ioActionFlags & kAudioUnitRenderAction_PostRender) printf("kAudioUnitRenderAction_PostRender ");
if (*ioActionFlags & kAudioUnitRenderAction_OutputIsSilence) printf("kAudioUnitRenderAction_OutputIsSilence ");
if (*ioActionFlags & kAudioOfflineUnitRenderAction_Preflight) printf("kAudioOfflineUnitRenderAction_Prefli ght ");
if (*ioActionFlags & kAudioOfflineUnitRenderAction_Render) printf("kAudioOfflineUnitRenderAction_Render");
if (*ioActionFlags & kAudioOfflineUnitRenderAction_Complete) printf("kAudioOfflineUnitRenderAction_Complete ");
if (*ioActionFlags & kAudioUnitRenderAction_PostRenderError) printf("kAudioUnitRenderAction_PostRenderError ");
if (*ioActionFlags & kAudioUnitRenderAction_DoNotCheckRenderArgs) printf("kAudioUnitRenderAction_DoNotCheckRenderArgs ");
}
/**
This callback is called when new audio data from the microphone is
available.
*/
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
double timeInSeconds = inTimeStamp->mSampleTime / 44100.00;
printf("\n%fs inBusNumber: %lu inNumberFrames: %lu ", timeInSeconds, inBusNumber, inNumberFrames);
printAudioUnitRenderActionFlags(ioActionFlags);
// Because of the way our audio format (setup below) is chosen:
// we only need 1 buffer, since it is mono
// Samples are 16 bits = 2 bytes.
// 1 frame includes only 1 sample
AudioBuffer buffer;
buffer.mNumberChannels = 1;
buffer.mDataByteSize = inNumberFrames * 2;
buffer.mData = malloc( inNumberFrames * 2 );
// Put buffer in a AudioBufferList
AudioBufferList bufferList;
SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
memset (&samples, 0, sizeof (samples));
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0] = buffer;
// Then:
// Obtain recorded samples
OSStatus status;
status = AudioUnitRender([iosAudio audioUnit],
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);
checkStatus(status);
// Now, we have the samples we just read sitting in buffers in bufferList
// Process the new data
[iosAudio processAudio:&bufferList];
// Now, we have the samples we just read sitting in buffers in bufferList
ExtAudioFileWriteAsync([iosAudio mAudioFileRef], inNumberFrames, &bufferList);
// release the malloc'ed data in the buffer we created earlier
free(bufferList.mBuffers[0].mData);
return noErr;
}
/**
This callback is called when the audioUnit needs new data to play through the
speakers. If you don't have any, just don't write anything in the buffers
*/
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Notes: ioData contains buffers (may be more than one!)
// Fill them up as much as you can. Remember to set the size value in each buffer to match how
// much data is in the buffer.
for (int i=0; i < ioData->mNumberBuffers; i++) { // in practice we will only ever have 1 buffer, since audio format is mono
AudioBuffer buffer = ioData->mBuffers[i];
// NSLog(#" Buffer %d has %d channels and wants %d bytes of data.", i, buffer.mNumberChannels, buffer.mDataByteSize);
// copy temporary buffer data to output buffer
UInt32 size = min(buffer.mDataByteSize, [iosAudio tempBuffer].mDataByteSize); // dont copy more data then we have, or then fits
memcpy(buffer.mData, [iosAudio tempBuffer].mData, size);
buffer.mDataByteSize = size; // indicate how much data we wrote in the buffer
// uncomment to hear random noise
/*
UInt16 *frameBuffer = buffer.mData;
for (int j = 0; j < inNumberFrames; j++) {
frameBuffer[j] = rand();
}
*/
}
return noErr;
}
#implementation IosAudioController
#synthesize audioUnit, tempBuffer,mAudioFileRef;
/**
Initialize the audioUnit and allocate our own temporary buffer.
The temporary buffer will hold the latest data coming in from the microphone,
and will be copied to the output when this is requested.
*/
- (id) init {
self = [super init];
OSStatus status;
AVAudioSession *session = [AVAudioSession sharedInstance];
NSLog(#"%f",session.preferredIOBufferDuration);
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &audioUnit);
checkStatus(status);
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
checkStatus(status);
// Enable IO for playback
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
checkStatus(status);
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status);
// Set output callback
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status);
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
flag = 0;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus,
&flag,
sizeof(flag));
// set preferred buffer size
Float32 audioBufferSize = (0.023220);
UInt32 size = sizeof(audioBufferSize);
status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration,
size, &audioBufferSize);
// Allocate our own buffers (1 channel, 16 bits per sample, thus 16 bits per frame, thus 2 bytes per frame).
// Practice learns the buffers used contain 512 frames, if this changes it will be fixed in processAudio.
tempBuffer.mNumberChannels = 1;
tempBuffer.mDataByteSize = 512 * 2;
tempBuffer.mData = malloc( 512 * 2 );
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *destinationFilePath = [[NSString alloc] initWithFormat: #"%#/output.caf", documentsDirectory];
NSLog(#">>> %#\n", destinationFilePath);
CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, ( CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);
OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &mAudioFileRef);
CFRelease(destinationURL);
NSAssert(setupErr == noErr, #"Couldn't create file for writing");
setupErr = ExtAudioFileSetProperty(mAudioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);
NSAssert(setupErr == noErr, #"Couldn't create file for format");
setupErr = ExtAudioFileWriteAsync(mAudioFileRef, 0, NULL);
NSAssert(setupErr == noErr, #"Couldn't initialize write buffers for audio file");
// Initialise
status = AudioUnitInitialize(audioUnit);
checkStatus(status);
// [NSTimer scheduledTimerWithTimeInterval:5 target:self selector:#selector(stopRecording:) userInfo:nil repeats:NO];
return self;
}
/**
Start the audioUnit. This means data will be provided from
the microphone, and requested for feeding to the speakers, by
use of the provided callbacks.
*/
- (void) start {
OSStatus status = AudioOutputUnitStart(audioUnit);
checkStatus(status);
}
/**
Stop the audioUnit
*/
- (void) stop {
OSStatus status = AudioOutputUnitStop(audioUnit);
checkStatus(status);
[self stopRecording:nil];
}
/**
Change this function to decide what is done with incoming
audio data from the microphone.
Right now we copy it to our own temporary buffer.
*/
- (void) processAudio: (AudioBufferList*) bufferList{
AudioBuffer sourceBuffer = bufferList->mBuffers[0];
// fix tempBuffer size if it's the wrong size
if (tempBuffer.mDataByteSize != sourceBuffer.mDataByteSize) {
free(tempBuffer.mData);
tempBuffer.mDataByteSize = sourceBuffer.mDataByteSize;
tempBuffer.mData = malloc(sourceBuffer.mDataByteSize);
}
// copy incoming audio data to temporary buffer
memcpy(tempBuffer.mData, bufferList->mBuffers[0].mData, bufferList->mBuffers[0].mDataByteSize);
}
- (void)stopRecording:(NSTimer*)theTimer
{
printf("\nstopRecording\n");
OSStatus status = ExtAudioFileDispose(mAudioFileRef);
printf("OSStatus(ExtAudioFileDispose): %ld\n", status);
}
/**
Clean up.
*/
- (void) dealloc {
[super dealloc];
AudioUnitUninitialize(audioUnit);
free(tempBuffer.mData);
}
This Will definitely help you people..
Another Best Way of Doing this is to download Audio Touch from https://github.com/tkzic/audiograph and see Echo function of this application it repeat voice as you speak , but it does not record audio so Add Recording function into it , AS mentioned below:
IN MixerHostAudio.h
#property (readwrite) ExtAudioFileRef mRecordFile;
-(void)Record;
-(void)StopRecord;
IN MixerHostAudio.m
//ADD these two function in this class
-(void)Record{
NSString *completeFileNameAndPath = [[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject] stringByAppendingString:#"/Record.wav"];
//create the url that the recording object needs to reference the file
CFURLRef audioFileURL = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)[completeFileNameAndPath cStringUsingEncoding:[NSString defaultCStringEncoding]] , strlen([completeFileNameAndPath cStringUsingEncoding:[NSString defaultCStringEncoding]]), false);
AudioStreamBasicDescription dstFormat, clientFormat;
memset(&dstFormat, 0, sizeof(dstFormat));
memset(&clientFormat, 0, sizeof(clientFormat));
AudioFileTypeID fileTypeId = kAudioFileWAVEType;
UInt32 size = sizeof(dstFormat);
dstFormat.mFormatID = kAudioFormatLinearPCM;
// setup the output file format
dstFormat.mSampleRate = 44100.0; // set sample rate
// create a 16-bit 44100kHz Stereo format
dstFormat.mChannelsPerFrame = 2;
dstFormat.mBitsPerChannel = 16;
dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 4;
dstFormat.mFramesPerPacket = 1;
dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
//get the client format directly from
UInt32 asbdSize = sizeof (AudioStreamBasicDescription);
AudioUnitGetProperty(mixerUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0, // input bus
&clientFormat,
&asbdSize);
ExtAudioFileCreateWithURL(audioFileURL, fileTypeId, &dstFormat, NULL, kAudioFileFlags_EraseFile, &mRecordFile);
printf("recording\n");
ExtAudioFileSetProperty(mRecordFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
//call this once as this will alloc space on the first call
ExtAudioFileWriteAsync(mRecordFile, 0, NULL);
}
-(void)StopRecord{
ExtAudioFileDispose(mRecordFile);
}
//In micLineInCallback function Add this line at last before return noErr; :
ExtAudioFileWriteAsync([THIS mRecordFile] , inNumberFrames, ioData);
And call these function from MixerHostViewController.m in - (IBAction) playOrStop: (id) sender method
You'll need to use AudioUnits if you want real-time monitoring of your audio input.
Apple's Audio Unit Hosting Guide
Tutorial on configuring the Remote I/O Audio Unit
The RemoteIO Audio Unit can be used for simultaneous record and play. There are plenty of examples of recording using RemoteIO (aurioTouch) and playing using RemoteIO. Just enable both unit input and unit output, and handle both buffer callbacks. See an example here
Related
My task is to play an audio file that is saved locally in documents directory, apply audio effect in that audio file using Effect Audio Unit and save a new audio file in documents directory with that effect.
Here is my code that i have written so far, but its not working. Effects are not being applied in the audio. Please suggest me what is wrong in this code ?? Thanks in advance..
- (void) setUpAudioUnits
{
OSStatus setupErr = noErr;
// describe unit
AudioComponentDescription audioCompDesc;
audioCompDesc.componentType = kAudioUnitType_Output;
audioCompDesc.componentSubType = kAudioUnitSubType_RemoteIO;
audioCompDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
audioCompDesc.componentFlags = 0;
audioCompDesc.componentFlagsMask = 0;
// get rio unit from audio component manager
AudioComponent rioComponent = AudioComponentFindNext(NULL, &audioCompDesc);
setupErr = AudioComponentInstanceNew(rioComponent, &remoteIOUnit);
NSAssert (setupErr == noErr, #"Couldn't get RIO unit instance");
// set up the rio unit for playback
UInt32 oneFlag = 1;
AudioUnitElement outputElement = 0;
setupErr =
AudioUnitSetProperty (remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
outputElement,
&oneFlag,
sizeof(oneFlag));
NSAssert (setupErr == noErr, #"Couldn't enable RIO output");
// enable rio input
AudioUnitElement inputElement = 1;
// setup an asbd in the iphone canonical format
AudioStreamBasicDescription myASBD;
memset (&myASBD, 0, sizeof (myASBD));
// myASBD.mSampleRate = 44100;
myASBD.mSampleRate = hardwareSampleRate;
myASBD.mFormatID = kAudioFormatLinearPCM;
myASBD.mFormatFlags = kAudioFormatFlagsCanonical;
myASBD.mBytesPerPacket = 4;
myASBD.mFramesPerPacket = 1;
myASBD.mBytesPerFrame = 4;
myASBD.mChannelsPerFrame = 2;
myASBD.mBitsPerChannel = 16;
/*
// set format for output (bus 0) on rio's input scope
*/
setupErr =
AudioUnitSetProperty (remoteIOUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
outputElement,
&myASBD,
sizeof (myASBD));
NSAssert (setupErr == noErr, #"Couldn't set ASBD for RIO on input scope / bus 0");
// song must be an LPCM file, preferably in caf container
// to convert, use /usr/bin/afconvert, like this:
// /usr/bin/afconvert --data LEI16 Girlfriend.m4a song.caf
// read in the entire audio file (NOT recommended)
// better to use a ring buffer: thread or timer fills, render callback drains
NSURL *songURL = [NSURL fileURLWithPath:
[[NSBundle mainBundle] pathForResource: #"song"
ofType: #"caf"]];
AudioFileID songFile;
setupErr = AudioFileOpenURL((CFURLRef) songURL,
kAudioFileReadPermission,
0,
&songFile);
NSAssert (setupErr == noErr, #"Couldn't open audio file");
UInt64 audioDataByteCount;
UInt32 audioDataByteCountSize = sizeof (audioDataByteCount);
setupErr = AudioFileGetProperty(songFile,
kAudioFilePropertyAudioDataByteCount,
&audioDataByteCountSize,
&audioDataByteCount);
NSAssert (setupErr == noErr, #"Couldn't get size property");
musicPlaybackState.audioData = malloc (audioDataByteCount);
musicPlaybackState.audioDataByteCount = audioDataByteCount;
musicPlaybackState.samplePtr = musicPlaybackState.audioData;
NSLog (#"reading %qu bytes from file", audioDataByteCount);
UInt32 bytesRead = audioDataByteCount;
setupErr = AudioFileReadBytes(songFile,
false,
0,
&bytesRead,
musicPlaybackState.audioData);
NSAssert (setupErr == noErr, #"Couldn't read audio data");
NSLog (#"read %d bytes from file", bytesRead);
AudioStreamBasicDescription fileASBD;
UInt32 asbdSize = sizeof (fileASBD);
setupErr = AudioFileGetProperty(songFile,
kAudioFilePropertyDataFormat,
&asbdSize,
&fileASBD);
NSAssert (setupErr == noErr, #"Couldn't get file asbd");
ExtAudioFileCreateWithURL(outputFileURL,
kAudioFileCAFType,
&fileASBD,
nil,
kAudioFileFlags_EraseFile,
&musicPlaybackState.extAudioFile);
// get the mixer unit
AudioComponentDescription mixerDesc;
mixerDesc.componentType = kAudioUnitType_Effect;
mixerDesc.componentSubType = kAudioUnitSubType_Delay;
mixerDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
mixerDesc.componentFlags = 0;
mixerDesc.componentFlagsMask = 0;
// get mixer unit from audio component manager
AudioComponent mixerComponent = AudioComponentFindNext(NULL, &mixerDesc);
setupErr = AudioComponentInstanceNew(mixerComponent, &mixerUnit);
NSAssert (setupErr == noErr, #"Couldn't get mixer unit instance");
// set up connections and callbacks
// connect mixer bus 0 input to robot voice render callback
effectState.rioUnit = remoteIOUnit;
effectState.sineFrequency = 23;
effectState.sinePhase = 0;
effectState.asbd = myASBD;
// connect mixer bus 1 input to music player callback
AURenderCallbackStruct musicPlayerCallbackStruct;
musicPlayerCallbackStruct.inputProc = MusicPlayerCallback; // callback function
musicPlayerCallbackStruct.inputProcRefCon = &musicPlaybackState;
setupErr =
AudioUnitSetProperty(mixerUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
outputElement,
&musicPlayerCallbackStruct,
sizeof (musicPlayerCallbackStruct));
NSAssert (setupErr == noErr, #"Couldn't set mixer render callback on bus 1");
// direct connect mixer to output
AudioUnitConnection connection;
connection.sourceAudioUnit = mixerUnit;
connection.sourceOutputNumber = outputElement;
connection.destInputNumber = outputElement;
setupErr =
AudioUnitSetProperty(remoteIOUnit,
kAudioUnitProperty_MakeConnection,
kAudioUnitScope_Input,
outputElement,
&connection,
sizeof (connection));
NSAssert (setupErr == noErr, #"Couldn't set mixer-to-RIO connection");
setupErr = AudioUnitInitialize(mixerUnit);
NSAssert (setupErr == noErr, #"Couldn't initialize mixer unit");
setupErr = AudioUnitInitialize(remoteIOUnit);
NSAssert (setupErr == noErr, #"Couldn't initialize RIO unit");
setupErr = AudioOutputUnitStart (remoteIOUnit);
}
When you have instance of initialized audio unit, you can apply effect to sound using AudioUnitRender by providing AudioBufferList to it.
First of all, make sure that you have sound in format which accepted by Audio Unit. You can get this format by getting kAudioUnitProperty_StreamFormat property.
If your audio file has different format than one you got from audio unit, you can convert audio "on the fly" by using ExtAudioFile. To achieve this, you must set kExtAudioFileProperty_ClientDataFormat property in ExtAudioFile to format which you got from 'kAudioUnitProperty_StreamFormat'. Now, when you will read audio file you will get audio in needed format.
Also, make sure that kAudioUnitProperty_ShouldAllocateBuffer property of Audio Unit is set to 1.
To call AudioUnitRender you must prepare valid AudioTimeStamp, AudioUnitRenderActionFlags (can be set to 0) and AudioBufferList. You don't need to allocate memory for buffers, you need just provide number of buffers and it's size.
AudioBufferList *buffer = malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer));
buffer->mNumberBuffers = 2; // at least 2 buffers
buffer->mBuffers[0].mDataByteSize = ...; // size of one buffer
buffer->mBuffers[1].mDataByteSize = ...;
AudioUnitRenderActionFlags flags = 0;
AudioTimeStamp timeStamp;
memset(&timeStamp, 0, sizeof(AudioTimeStamp));
timeStamp.mFlags = kAudioTimeStampSampleTimeValid;
UInt32 frames = ...; // number of frames in buffer
AudioUnit unit = ...; // your Delay unit
Now you can call AudioUnitRender:
AudioUnitRender(unit, &flags, &timeStamp, 0, frames, buffer);
Audio unit will ask callback for fill buffers and apply effect to sound, after that you will have buffers with valid audio. In this case you need to set kAudioUnitProperty_SetRenderCallback property to valid callback.
I'm trying to create my own custom sound effects Audio Unit based on the input from the mic. This application allows simultaneous input/output from the microphone to speaker. I can apply effects and work using the simulator, but when I try to test on the iPhone I can't hear anything. I paste my code if anyone can help me:
- (id) init{
self = [super init];
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &audioUnit);
checkStatus(status);
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
checkStatus(status);
// Enable IO for playback
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
checkStatus(status);
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status);
// Set output callback
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status);
// Allocate our own buffers (1 channel, 16 bits per sample, thus 16 bits per frame, thus 2 bytes per frame).
// Practice learns the buffers used contain 512 frames, if this changes it will be fixed in processAudio.
tempBuffer.mNumberChannels = 1;
tempBuffer.mDataByteSize = 512 * 2;
tempBuffer.mData = malloc( 512 * 2 );
// Initialise
status = AudioUnitInitialize(audioUnit);
checkStatus(status);
return self;
}
This callback is called when new audio data from the microphone is available. But never enter here when I test on the iPhone:
static OSStatus recordingCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) {
AudioBuffer buffer;
buffer.mNumberChannels = 1;
buffer.mDataByteSize = inNumberFrames * 2;
buffer.mData = malloc( inNumberFrames * 2 );
// Put buffer in a AudioBufferList
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0] = buffer;
// Then:
// Obtain recorded samples
OSStatus status;
status = AudioUnitRender([iosAudio audioUnit],
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);
checkStatus(status);
// Now, we have the samples we just read sitting in buffers in bufferList
// Process the new data
[iosAudio processAudio:&bufferList];
// release the malloc'ed data in the buffer we created earlier
free(bufferList.mBuffers[0].mData);
return noErr;
}
I solved my problem. I simply needed to initialize the AudioSession before playing/recording. I did so with the following code:
OSStatus status;
AudioSessionInitialize(NULL, NULL, NULL, self);
UInt32 sessionCategory = kAudioSessionCategory_PlayAndRecord;
status = AudioSessionSetProperty (kAudioSessionProperty_AudioCategory,
sizeof (sessionCategory),
&sessionCategory);
if (status != kAudioSessionNoError)
{
if (status == kAudioServicesUnsupportedPropertyError) {
NSLog(#"AudioSessionInitialize failed: unsupportedPropertyError");
}else if (status == kAudioServicesBadPropertySizeError) {
NSLog(#"AudioSessionInitialize failed: badPropertySizeError");
}else if (status == kAudioServicesBadSpecifierSizeError) {
NSLog(#"AudioSessionInitialize failed: badSpecifierSizeError");
}else if (status == kAudioServicesSystemSoundUnspecifiedError) {
NSLog(#"AudioSessionInitialize failed: systemSoundUnspecifiedError");
}else if (status == kAudioServicesSystemSoundClientTimedOutError) {
NSLog(#"AudioSessionInitialize failed: systemSoundClientTimedOutError");
}else {
NSLog(#"AudioSessionInitialize failed! %ld", status);
}
}
AudioSessionSetActive(TRUE);
...
I investigate aurioTouch2 sample code. But I wanna record everything in file. aurioTouch doesn't provide this possibility. I tried to record data using this code in FFTBufferManager.cpp in void FFTBufferManager::GrabAudioData(AudioBufferList *inBL)
ExtAudioFileRef cafFile;
AudioStreamBasicDescription cafDesc;
cafDesc.mBitsPerChannel = 16;
cafDesc.mBytesPerFrame = 4;
cafDesc.mBytesPerPacket = 4;
cafDesc.mChannelsPerFrame = 2;
cafDesc.mFormatFlags = 0;
cafDesc.mFormatID = 'ima4';
cafDesc.mFramesPerPacket = 1;
cafDesc.mReserved = 0;
cafDesc.mSampleRate = 44100;
CFStringRef refH;
refH = CFStringCreateWithCString(kCFAllocatorDefault, "/var/mobile/Applications/BD596ECF-A6F2-41EB-B4CE-3A9644B1C26A/Documents/voice2.caff", kCFStringEncodingUTF8);
CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault,
refH,
kCFURLPOSIXPathStyle,
false);
OSType status = ExtAudioFileCreateWithURL(
destinationURL, // inURL
'caff', // inFileType
&cafDesc, // inStreamDesc
NULL, // inChannelLayout
kAudioFileFlags_EraseFile, // inFlags
&cafFile // outExtAudioFile
); // returns 0xFFFFFFCE
ExtAudioFileWrite(cafFile, mNumberFrames, inBL);
And this works well, but I use AudioBufferList *inBL, and this is only small part of all audio data (about 1 second). This functions is called every 1 second to analize new audion data from microphone. So it would be great, if I can add data from one AudioBufferList to another AudioBufferList.
Or may be anybody know other approach.
You whould set up new AudioUnit to record audio (with its own callback function).
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &mAudioUnit);
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(mAudioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
// Enable IO for playback
status = AudioUnitSetProperty(mAudioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
// Describe format
AudioStreamBasicDescription audioFormat={0};
audioFormat.mSampleRate = kSampleRate;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(mAudioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
status = AudioUnitSetProperty(mAudioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = (__bridge void *)self;
status = AudioUnitSetProperty(mAudioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
flag = 0;
status = AudioUnitSetProperty(mAudioUnit,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus,
&flag,
sizeof(flag));
// On initialise le fichier audio
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *destinationFilePath = [[NSString alloc] initWithFormat: #"%#/output.caf", documentsDirectory];
NSLog(#">>> %#\n", destinationFilePath);
CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (__bridge CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);
OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &mAudioFileRef);
CFRelease(destinationURL);
NSAssert(setupErr == noErr, #"Couldn't create file for writing");
setupErr = ExtAudioFileSetProperty(mAudioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);
NSAssert(setupErr == noErr, #"Couldn't create file for format");
setupErr = ExtAudioFileWriteAsync(mAudioFileRef, 0, NULL);
NSAssert(setupErr == noErr, #"Couldn't initialize write buffers for audio file");
CheckError(AudioUnitInitialize(mAudioUnit), "AudioUnitInitialize");
CheckError(AudioOutputUnitStart(mAudioUnit), "AudioOutputUnitStart");
So I am having some trouble here with my AudioUnit taking in data from microphone/line-in in iOS. I am able to set everything up to what I think is okay and it is calling my recordingCallback, but the data that I am getting out of the buffer is not correct. It always returns exactly the same thing, which is mostly zeros and random large numbers. Does anyone know what could be causing this. My code is as follows.
Setting up Audio Unit
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
status = AudioComponentInstanceNew(inputComponent, &audioUnit);
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBusNumber,
&flag,
sizeof(flag));
// Disable playback IO
flag = 0;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBusNumber,
&flag,
sizeof(flag));
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked |kAudioFormatFlagIsNonInterleaved;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 32;
audioFormat.mBytesPerPacket = 4;
audioFormat.mBytesPerFrame = 4;
// Apply format
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBusNumber,
&audioFormat,
sizeof(audioFormat));
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = (__bridge void*)self;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBusNumber,
&callbackStruct,
sizeof(callbackStruct));
status = AudioUnitInitialize(audioUnit);
Input Callback
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mDataByteSize = 4;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mData = malloc(sizeof(float)*inNumberFrames); //
InputAudio *input = (__bridge InputAudio*)inRefCon;
OSStatus status;
status = AudioUnitRender([input audioUnit],
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);
float* result = (float*)&bufferList.mBuffers[0].mData;
if (input->counter == 5) {
for (int i = 0;i<inNumberFrames;i++) {
printf("%f ",result[i]);
}
}
input->counter++;
return noErr;
}
Anyone ever encounter a similar problem or see something clearly wrong in my code. Thanks in advance for any help!
I am basing all of it off of Michael Tysons Core Audio RemoteIO code
If I remember correctly, the samples you get from the audio buffer in the callback aren't floats, they're SInt16. Try casting the samples like this:
SInt16 *sn16AudioData= (SInt16 *)(bufferList.mBuffers[0].mData);
And these should be the max and min values:
#define sn16_MAX_SAMPLE_VALUE 32767
#define sn16_MIN_SAMPLE_VALUE -32768
I was basically trying to do the same thing with very similar code but using an AudioGraph(). I had the same problem, zeros in my output data from the mic and could not get it working until I added the line
Status = AUGraphConnectNodeInput(graph, ioNode, 1, ioNode, 0);
As you are not using a graph you will need to call AudioUnitSetProperty() with kAudioUnitProperty_MakeConnection and pass and pass a complete AudioUnitConnection structure
I guess "AudioConverterFillComplexBuffer" is the solution.
But I don't know this way is right.
+1. AudioUnit
initialize AudioUnit : "recordingCallback" is callback method.
the output format is PCM.
record to file.( I played the recorded file).
+2. AudioConverter
add "AudioConverterFillComplexBuffer"
I don't know about it well. added,
+3. problem
"audioConverterComplexInputDataProc" method called only one time.
How can I use AudioConverter api?
Attached my code
#import "AACAudioRecorder.h"
#define kOutputBus 0
#define kInputBus 1
#implementation AACAudioRecorder
This is AudioConverterFillComplexBuffer's callback method.
static OSStatus audioConverterComplexInputDataProc( AudioConverterRef inAudioConverter,
UInt32* ioNumberDataPackets,
AudioBufferList* ioData,
AudioStreamPacketDescription** outDataPacketDescription,
void* inUserData){
ioData = (AudioBufferList*)inUserData;
return 0;
}
This is AudioUnit's callback.
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
#autoreleasepool {
AudioBufferList *bufferList;
AACAudioRecorder *THIS = (AACAudioRecorder *)inRefCon;
OSStatus err = AudioUnitRender(THIS-> m_audioUnit ,
ioActionFlags,
inTimeStamp, 1, inNumberFrames, bufferList);
if (err) { NSLog(#"%s AudioUnitRender error %d\n",__FUNCTION__, (int)err); return err; }
NSString *recordFile =
[NSTemporaryDirectory() stringByAppendingPathComponent: #"auioBuffer.pcm"];
FILE *fp;
fp = fopen([recordFile UTF8String], "a+");
fwrite(bufferList->mBuffers[0].mData, sizeof(Byte),
bufferList->mBuffers[0].mDataByteSize, fp);
fclose(fp);
[THIS convert:bufferList ioOutputDataPacketSize:&inNumberFrames];
if (err) {NSLog(#"%s : AudioFormat Convert error %d\n",__FUNCTION__, (int)err); }
}
return noErr;
}
status check method
static void checkStatus(OSStatus status, const char* str){
if (status != noErr) {
NSLog(#"%s %s error : %ld ",__FUNCTION__, str, status);
}
}
convert method : PCM -> AAC
- (void)convert:(AudioBufferList*)input_bufferList ioOutputDataPacketSize:(UInt32*)packetSizeRef
{
UInt32 size = sizeof(UInt32);
UInt32 maxOutputSize;
AudioConverterGetProperty(m_audioConverterRef,
kAudioConverterPropertyMaximumOutputPacketSize,
&size,
&maxOutputSize);
AudioBufferList *output_bufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList));
output_bufferList->mNumberBuffers = 1;
output_bufferList->mBuffers[0].mNumberChannels = 1;
output_bufferList->mBuffers[0].mDataByteSize = *packetSizeRef * 2;
output_bufferList->mBuffers[0].mData = (AudioUnitSampleType *)malloc(*packetSizeRef * 2);
OSStatus err;
err = AudioConverterFillComplexBuffer(
m_audioConverterRef,
audioConverterComplexInputDataProc,
input_bufferList,
packetSizeRef,
output_bufferList,
NULL
);
if (err) {NSLog(#"%s : AudioFormat Convert error %d\n",__FUNCTION__, (int)err); }
}
This is initialize method.
- (void)initialize
{
// ...
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &m_audioUnit);
checkStatus(status,"AudioComponentInstanceNew");
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(m_audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
checkStatus(status,"Enable IO for recording");
// Enable IO for playback
status = AudioUnitSetProperty(m_audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
checkStatus(status,"Enable IO for playback");
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(m_audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status,"Apply format1");
status = AudioUnitSetProperty(m_audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status,"Apply format2");
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(m_audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status,"Set input callback");
// Initialise
status = AudioUnitInitialize(m_audioUnit);
checkStatus(status,"AudioUnitInitialize");
// Set ASBD For converting Output Stream
AudioStreamBasicDescription outputFormat;
memset(&outputFormat, 0, sizeof(outputFormat));
outputFormat.mSampleRate = 44100.00;
outputFormat.mFormatID = kAudioFormatMPEG4AAC;
outputFormat.mFormatFlags = kMPEG4Object_AAC_Main;
outputFormat.mFramesPerPacket = 1024;
outputFormat.mChannelsPerFrame = 1;
outputFormat.mBitsPerChannel = 0;
outputFormat.mBytesPerFrame = 0;
outputFormat.mBytesPerPacket = 0;
//Create An Audio Converter
status = AudioConverterNew( &audioFormat, &outputFormat, &m_audioConverterRef );
checkStatus(status,"Create An Audio Converter");
if(m_audioConverterRef) NSLog(#"m_audioConverterRef is created");
}
AudioOutputUnitStart
- (void)StartRecord
{
OSStatus status = AudioOutputUnitStart(m_audioUnit);
checkStatus(status,"AudioOutputUnitStart");
}
AudioOutputUnitStop
- (void)StopRecord
{
OSStatus status = AudioOutputUnitStop(m_audioUnit);
checkStatus(status,"AudioOutputUnitStop");
}
finish
- (void)finish
{
AudioUnitUninitialize(m_audioUnit);
}
#end
It took me a long time to understand AudioConverterFillComplexBuffer, and especially how to use it to convert audio in real-time. I've posted my approach here: How do I use CoreAudio's AudioConverter to encode AAC in real-time?
Reference https://developer.apple.com/library/ios/samplecode/iPhoneACFileConvertTest/Introduction/Intro.html
It demonstrates using the Audio Converter APIs to convert from a PCM audio format to a compressed format including AAC.