Could not start Audio Queue Error starting recording - iphone

CFStringRef state;
UInt32 propertySize = sizeof(CFStringRef);
// AudioSessionInitialize(NULL, NULL, NULL, NULL);
AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &propertySize, &state);
if(CFStringGetLength(state) == 0)
// if(state == 0)
{ //SILENT
NSLog(#"Silent switch is on");
// create vibrate
// AudioServicesPlaySystemSound(kSystemSoundID_Vibrate);
UInt32 audioCategory = kAudioSessionCategory_MediaPlayback;
AudioSessionSetProperty( kAudioSessionProperty_AudioCategory, sizeof(UInt32), &audioCategory);
}
else { //NOT SILENT
NSLog(#"Silent switch is off");
}
where ever i use Above code i am able to play sound file in Silent mode
but after playing recorded sound file in silent mode when i try to record voice again
I get an error
LIke
2010-12-08 13:29:56.710 VoiceRecorder[382:307] -66681
Could not start Audio Queue
Error starting recording
here is the code
// file url
[self setupAudioFormat:&recordState.dataFormat];
CFURLRef fileURL = CFURLCreateFromFileSystemRepresentation(NULL, (const UInt8 *) [filePath UTF8String], [filePath length], NO);
// recordState.currentPacket = 0;
// new input queue
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat, HandleInputBuffer, &recordState, CFRunLoopGetCurrent(),kCFRunLoopCommonModes, 0, &recordState.queue);
if (status) {CFRelease(fileURL); printf("Could not establish new queue\n"); return NO;}
// create new audio file
status = AudioFileCreateWithURL(fileURL, kAudioFileAIFFType, &recordState.dataFormat, kAudioFileFlags_EraseFile, &recordState.audioFile); CFRelease(fileURL); // thanks august joki
if (status) {printf("Could not create file to record audio\n"); return NO;}
// figure out the buffer size
DeriveBufferSize(recordState.queue, recordState.dataFormat, 0.5, &recordState.bufferByteSize); // allocate those buffers and enqueue them
for(int i = 0; i < NUM_BUFFERS; i++)
{
status = AudioQueueAllocateBuffer(recordState.queue, recordState.bufferByteSize, &recordState.buffers[i]);
if (status) {printf("Error allocating buffer %d\n", i); return NO;}
status = AudioQueueEnqueueBuffer(recordState.queue, recordState.buffers[i], 0, NULL);
if (status) {printf("Error enqueuing buffer %d\n", i); return NO;}
} // enable metering
UInt32 enableMetering = YES;
status = AudioQueueSetProperty(recordState.queue, kAudioQueueProperty_EnableLevelMetering, &enableMetering,sizeof(enableMetering));
if (status) {printf("Could not enable metering\n"); return NO;}
// start recording
status = AudioQueueStart(recordState.queue, NULL); // status = 0; NSLog(#"%d",status);
if (status) {printf("Could not start Audio Queue\n"); return NO;}
recordState.currentPacket = 0;
recordState.recording = YES;
return YES;
i get an error here

I was facing similar problem in iOS 7.1. Add following in AppDelegate's didFinishLaunchingWithOptions :
AVAudioSession * audioSession = [AVAudioSession sharedInstance];
[audioSession setCategory:AVAudioSessionCategoryPlayAndRecord error: nil];
[audioSession setActive:YES error: nil];
EDIT : Above code is working for me

Related

AudioUnitInitialize throws error while initializing AudioComponentInstance

I am using below code to init my audio components.
-(void) startListeningWithCoreAudio
{
NSError *error = nil;
[[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategoryPlayAndRecord error:&error];
if (error)
NSLog(#"error setting up audio session: %#", [error localizedDescription]);
[[AVAudioSession sharedInstance] setDelegate:self];
OSStatus status = AudioSessionSetActive(YES);
checkStatus(status);
// Find the apple mic
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponent inputComponent = AudioComponentFindNext( NULL, &desc );
status = AudioComponentInstanceNew( inputComponent, &kAudioUnit );
checkStatus( status );
// enable mic output as our input
UInt32 flag = 1;
status = AudioUnitSetProperty( kAudioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag) );
checkStatus(status);
// Define mic output audio format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 16000.0;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
status = AudioUnitSetProperty( kAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, sizeof(audioFormat) );
checkStatus(status);
// Define our callback methods
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty( kAudioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &callbackStruct, sizeof(callbackStruct) );
checkStatus(status);
// By pass voice processing
UInt32 audiobypassProcessing = [[NSUserDefaults standardUserDefaults] boolForKey:VOICE_BY_PASS_PROCESSING];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_BypassVoiceProcessing,
kAudioUnitScope_Global, kInputBus, &audiobypassProcessing, sizeof(audiobypassProcessing));
checkStatus(status);
// Automatic Gain Control
UInt32 audioAGC = [[NSUserDefaults standardUserDefaults]boolForKey:VOICE_AGC];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_VoiceProcessingEnableAGC,
kAudioUnitScope_Global, kInputBus, &audioAGC, sizeof(audioAGC));
checkStatus(status);
//Non Audio Voice Ducking
UInt32 audioDucking = [[NSUserDefaults standardUserDefaults]boolForKey:VOICE_DUCKING];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_DuckNonVoiceAudio,
kAudioUnitScope_Global, kInputBus, &audioDucking, sizeof(audioDucking));
checkStatus(status);
//Audio Quality
UInt32 quality = [[NSUserDefaults standardUserDefaults]integerForKey:VOICE_QUALITY];
status = AudioUnitSetProperty(kAudioUnit, kAUVoiceIOProperty_VoiceProcessingQuality,
kAudioUnitScope_Global, kInputBus, &quality, sizeof(quality));
checkStatus(status);
status = AudioUnitInitialize(kAudioUnit);
checkStatus(status);
status = AudioOutputUnitStart( kAudioUnit );
checkStatus(status);
UInt32 audioRoute = (UInt32)kAudioSessionOverrideAudioRoute_Speaker;
status = AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute, sizeof (audioRoute), &audioRoute);
checkStatus(status);
}
-(void) stopListeningWithCoreAudio
{
OSStatus status = AudioUnitUninitialize( kAudioUnit );
checkStatus(status);
status = AudioOutputUnitStop( kAudioUnit );
checkStatus( status );
// if(kAudioUnit)
// {
// status = AudioComponentInstanceDispose(kAudioUnit);
// checkStatus(status);
// kAudioUnit = nil;
// }
status = AudioSessionSetActive(NO);
checkStatus(status);
NSError *error = nil;
[[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategorySoloAmbient error:&error];
if (error)
NSLog(#"error setting up audio session: %#", [error localizedDescription]);
}
It works fine for first time. I mean startListeningWithCoreAudio is called by a button pressed event. It could record/process audio well. On other event I am calling stopListeningWithCoreAudio to stop record/process audio.
The problem is coming when I am again trying to call the function startListeningWithCoreAudio. It throws error for two functions. AudioUnitInitialize and AudioOutputUnitStart which are called from startListeningWithCoreAudio.
Can anyone please help me what is the problem?
I found the solution.
If we call below functions back to back, it creates problem.
extern OSStatus AudioUnitUninitialize(AudioUnit inUnit)
extern OSStatus AudioComponentInstanceDispose(AudioComponentInstance inInstance)
So, I called dispose method on main thread by below way.
[self performSelectorOnMainThread:#selector(disposeCoreAudio) withObject:nil waitUntilDone:NO];
-(void) disposeCoreAudio
{
OSStatus status = AudioComponentInstanceDispose(kAudioUnit);
kAudioUnit = nil;
}
It solved the problem. So, the correct sequence is Stop recording, uninitialize recorder and dispose recorder on main thread.
One possible problem is that your code is trying to uninitialize a running audio unit before stopping it.

Application turns off playing music when launched

HI i have set my app audio session to ambientsound,
when i launched the app for the fist time she just kill the music.
i dont want this to happen.
there is any other way to set this?
Try this :
Activate audio session :
OSStatus activationResult = NULL;
result = AudioSessionSetActive (true);
Test if other audio is playing
UInt32 otherAudioIsPlaying; // 1
UInt32 propertySize = sizeof (otherAudioIsPlaying);
AudioSessionGetProperty ( // 2
kAudioSessionProperty_OtherAudioIsPlaying,
&propertySize,
&otherAudioIsPlaying
);
if (otherAudioIsPlaying) { // 3
[[AVAudioSession sharedInstance]
setCategory: AVAudioSessionCategoryAmbient
error: nil];
} else {
[[AVAudioSession sharedInstance]
setCategory: AVAudioSessionCategorySoloAmbient
error: nil];
}
If YES, allow mixing
OSStatus propertySetError = 0;
UInt32 allowMixing = true;
propertySetError = AudioSessionSetProperty (
kAudioSessionProperty_OverrideCategoryMixWithOthers, // 1
sizeof (allowMixing), // 2
&allowMixing // 3
);
Source : http://developer.apple.com/library/ios/#documentation/Audio/Conceptual/AudioSessionProgrammingGuide/Cookbook/Cookbook.html

AudioUnits causing universal skipping after returning from Springboard

I have a problem in my applications where I am using AudioUnits. All of the applications Audio (including audio not played through AudioUnits) will start skipping after exiting to Springboard and returning to the applications.
I broke out the problem into a new separate test app. Here are the steps to repeat it:
Start an Audio file playing using an
AVAudioPlayer.
Create, Delete, then again Create an
AudioUnit
Exit to Springboard
Return to the app
The Audio from the AvAudioPlayer will start skipping
Here is some of the code I used:
- (IBAction)restartAudioUnit {
MySoundStream* audioUnitClass;
audioUnitClass = Load();
[audioUnitClass release];
audioUnitClass = Load();
}
Forgive the long code dump but AudioUnits are complex and I am fairly sure I am just setting them up or taking them down incorrectly.
The MySoundStream class:
OSStatus UnitRenderCB(void* pRefCon, AudioUnitRenderActionFlags* flags, const AudioTimeStamp* timeStamp, UInt32 busNum, UInt32 numFrames, AudioBufferList* pData){
OSStatus tErr = noErr;
//Do Nothing
return tErr;
}
#implementation MySoundStream
-(void) dealloc {
[self Unload];
[super dealloc];
}
-(void) Unload {
OSStatus tErr = noErr;
tErr = AudioUnitUninitialize(OutUnit);
}
#end
MySoundStream* Load()
{
OSStatus tErr = noErr;
AudioComponentInstance tRIO;
AudioComponentDescription tRIOCD;
AURenderCallbackStruct tRIOCB;
AudioStreamBasicDescription tAUF;
tRIOCD.componentType = kAudioUnitType_Output;
tRIOCD.componentSubType = kAudioUnitSubType_RemoteIO;
tRIOCD.componentManufacturer = kAudioUnitManufacturer_Apple;
tRIOCD.componentFlags = 0;
tRIOCD.componentFlagsMask = 0;
AudioComponent tRIOC = AudioComponentFindNext(NULL, &tRIOCD);
tErr = AudioComponentInstanceNew(tRIOC, &tRIO);
if (tErr != noErr) return NULL;
int tOutEnable = 1;
tErr = AudioUnitSetProperty(tRIO, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &tOutEnable, sizeof(tOutEnable));
if (tErr != noErr) return NULL;
tAUF.mSampleRate = 44100.00;
tAUF.mFormatID = kAudioFormatLinearPCM;
tAUF.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
tAUF.mFramesPerPacket = 1;
tAUF.mChannelsPerFrame = 2;
tAUF.mBitsPerChannel = 16;
tAUF.mBytesPerPacket = 4;
tAUF.mBytesPerFrame = 4;
tErr = AudioUnitSetProperty(tRIO, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &tAUF, sizeof(tAUF));
if (tErr != noErr) return false;
MySoundStream* pRet = [MySoundStream alloc];
tRIOCB.inputProc = UnitRenderCB;
tRIOCB.inputProcRefCon = pRet;
tErr = AudioUnitSetProperty(tRIO, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &tRIOCB, sizeof(tRIOCB));
if (tErr != noErr){ delete pRet; return NULL; }
tErr = AudioUnitInitialize(tRIO);
if (tErr != noErr){ delete pRet; return NULL; }
pRet->OutUnit = tRIO;
return pRet;
}
If anyone can see anything I am doing wrong with this AudioUnit, that woudl be very helpful.
Edit:
Upload the complete source. Here
Press Play Sound (may need headphones)
Press RestartAudioUnit
Return to Springboard
Re-enter TestAudioUnit app
Audio will skip
You are calling AudioUnitInitialize() when the app is re-initialized, which is not good. You need to call AudioUnitInitialize() only once when your app starts, and you should not have to build the entire AU graph every time your app enters the foreground.
Switched to using an AUGraph to setups my Audio Unit path and I had better luck.

Is there an event for when the headphones are unplugged?

During a test, a client noticed that video playback in the iPhone pauses when headphones are unplugged. He wanted similar functionality for audio playback, and maybe the ability to pop up a message.
Does anyone know if there's an event of some kind I could hook into to make this possible?
See Responding to Route Changes from the Audio Session Programming Guide.
This changed with iOS 7, you just need to listen to the notification named AVAudioSessionRouteChangeNotification
[[NSNotificationCenter defaultCenter] addObserver:self selector:#selector(audioRouteChanged:) name:AVAudioSessionRouteChangeNotification object:nil];
Swift 3.0 #snakeoil's solution:
NotificationCenter.default.addObserver(self, selector: #selector(YourViewController.yourMethodThatShouldBeCalledOnChange), name: NSNotification.Name.AVAudioSessionRouteChange, object: nil)
Here's the full implementation I eventually used for sending events when the headphones are plugged in (and unplugged).
There was a fair amount of complexity I needed to deal with to ensure things still worked after the app was returned from the background.
CVAudioSession.h file
#import <Foundation/Foundation.h>
#define kCVAudioInputChangedNotification #"kCVAudioInputChangedNotification"
#define kCVAudioInterruptionEnded #"kCVAudioInterruptionEnded"
#interface CVAudioSession : NSObject
+(void) setup;
+(void) destroy;
+(NSString*) currentAudioRoute;
+(BOOL) interrupted;
#end
CVAudioSession.m file
#import "CVAudioSession.h"
#import <AudioToolbox/AudioToolbox.h>
#implementation CVAudioSession
static BOOL _isInterrupted = NO;
+(void) setup {
NSLog(#"CVAudioSession setup");
// Set up the audio session for recording
OSStatus error = AudioSessionInitialize(NULL, NULL, interruptionListener, (__bridge void*)self);
if (error) NSLog(#"ERROR INITIALIZING AUDIO SESSION! %ld\n", error);
if (!error) {
UInt32 category = kAudioSessionCategory_RecordAudio; // NOTE CANT PLAY BACK WITH THIS
error = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
if (error) NSLog(#"couldn't set audio category!");
error = AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange, propListener, (__bridge void*) self);
if (error) NSLog(#"ERROR ADDING AUDIO SESSION PROP LISTENER! %ld\n", error);
UInt32 inputAvailable = 0;
UInt32 size = sizeof(inputAvailable);
// we do not want to allow recording if input is not available
error = AudioSessionGetProperty(kAudioSessionProperty_AudioInputAvailable, &size, &inputAvailable);
if (error) NSLog(#"ERROR GETTING INPUT AVAILABILITY! %ld\n", error);
// we also need to listen to see if input availability changes
error = AudioSessionAddPropertyListener(kAudioSessionProperty_AudioInputAvailable, propListener, (__bridge void*) self);
if (error) NSLog(#"ERROR ADDING AUDIO SESSION PROP LISTENER! %ld\n", error);
error = AudioSessionSetActive(true);
if (error) NSLog(#"CVAudioSession: AudioSessionSetActive (true) failed");
}
}
+ (NSString*) currentAudioRoute {
UInt32 routeSize = sizeof (CFStringRef);
CFStringRef route;
AudioSessionGetProperty (kAudioSessionProperty_AudioRoute,
&routeSize,
&route);
NSString* routeStr = (__bridge NSString*)route;
return routeStr;
}
+(void) destroy {
NSLog(#"CVAudioSession destroy");
// Very important - remove the listeners, or we'll crash when audio routes etc change when we're no longer on screen
OSStatus stat = AudioSessionRemovePropertyListenerWithUserData(kAudioSessionProperty_AudioRouteChange, propListener, (__bridge void*)self);
NSLog(#".. AudioSessionRemovePropertyListener kAudioSessionProperty_AudioRouteChange returned %ld", stat);
stat = AudioSessionRemovePropertyListenerWithUserData(kAudioSessionProperty_AudioInputAvailable, propListener, (__bridge void*)self);
NSLog(#".. AudioSessionRemovePropertyListener kAudioSessionProperty_AudioInputAvailable returned %ld", stat);
AudioSessionSetActive(false); // disable audio session.
NSLog(#"AudioSession is now inactive");
}
+(BOOL) interrupted {
return _isInterrupted;
}
// Called when audio is interrupted for whatever reason. NOTE: doesn't always call the END one..
void interruptionListener( void * inClientData,
UInt32 inInterruptionState) {
if (inInterruptionState == kAudioSessionBeginInterruption)
{
_isInterrupted = YES;
NSLog(#"CVAudioSession: interruptionListener kAudioSessionBeginInterruption. Disable audio session..");
// Try just deactivating the audiosession..
OSStatus rc = AudioSessionSetActive(false);
if (rc) {
NSLog(#"CVAudioSession: interruptionListener kAudioSessionBeginInterruption - AudioSessionSetActive(false) returned %.ld", rc);
} else {
NSLog(#"CVAudioSession: interruptionListener kAudioSessionBeginInterruption - AudioSessionSetActive(false) ok.");
}
} else if (inInterruptionState == kAudioSessionEndInterruption) {
_isInterrupted = NO;
// Reactivate the audiosession
OSStatus rc = AudioSessionSetActive(true);
if (rc) {
NSLog(#"CVAudioSession: interruptionListener kAudioSessionEndInterruption - AudioSessionSetActive(true) returned %.ld", rc);
} else {
NSLog(#"CVAudioSession: interruptionListener kAudioSessionEndInterruption - AudioSessionSetActive(true) ok.");
}
[[NSNotificationCenter defaultCenter] postNotificationName:kCVAudioInterruptionEnded object:(__bridge NSObject*)inClientData userInfo:nil];
}
}
// This is called when microphone or other audio devices are plugged in and out. Is on the main thread
void propListener( void * inClientData,
AudioSessionPropertyID inID,
UInt32 inDataSize,
const void * inData)
{
if (inID == kAudioSessionProperty_AudioRouteChange)
{
CFDictionaryRef routeDictionary = (CFDictionaryRef)inData;
CFNumberRef reason = (CFNumberRef)CFDictionaryGetValue(routeDictionary, CFSTR(kAudioSession_AudioRouteChangeKey_Reason));
SInt32 reasonVal;
CFNumberGetValue(reason, kCFNumberSInt32Type, &reasonVal);
if (reasonVal != kAudioSessionRouteChangeReason_CategoryChange)
{
NSLog(#"CVAudioSession: input changed");
[[NSNotificationCenter defaultCenter] postNotificationName:kCVAudioInputChangedNotification object:(__bridge NSObject*)inClientData userInfo:nil];
}
}
else if (inID == kAudioSessionProperty_AudioInputAvailable)
{
if (inDataSize == sizeof(UInt32)) {
UInt32 isAvailable = *(UInt32*)inData;
if (isAvailable == 0) {
NSLog(#"AUDIO RECORDING IS NOT AVAILABLE");
}
}
}
}
#end

How to play looping sound with OpenAL on iPhone

I'm following a tutorial about playing sound with OpenAL. Now everything works fine except I can't make the sound looping. I believe that I've used AL_LOOPING for the source. Now it can only play once and when it finishes playing, the app will block(doesn't response to my tap on the play button). Any ideas about what's wrong with the code?
// start up openAL
// init device and context
-(void)initOpenAL
{
// Initialization
mDevice = alcOpenDevice(NULL); // select the "preferred device"
if (mDevice) {
// use the device to make a context
mContext = alcCreateContext(mDevice, NULL);
// set my context to the currently active one
alcMakeContextCurrent(mContext);
}
}
// open the audio file
// returns a big audio ID struct
-(AudioFileID)openAudioFile:(NSString*)filePath
{
AudioFileID outAFID;
// use the NSURl instead of a cfurlref cuz it is easier
NSURL * afUrl = [NSURL fileURLWithPath:filePath];
// do some platform specific stuff..
#if TARGET_OS_IPHONE
OSStatus result = AudioFileOpenURL((CFURLRef)afUrl, kAudioFileReadPermission, 0, &outAFID);
#else
OSStatus result = AudioFileOpenURL((CFURLRef)afUrl, fsRdPerm, 0, &outAFID);
#endif
if (result != 0) NSLog(#"cannot openf file: %#",filePath);
return outAFID;
}
// find the audio portion of the file
// return the size in bytes
-(UInt32)audioFileSize:(AudioFileID)fileDescriptor
{
UInt64 outDataSize = 0;
UInt32 thePropSize = sizeof(UInt64);
OSStatus result = AudioFileGetProperty(fileDescriptor, kAudioFilePropertyAudioDataByteCount, &thePropSize, &outDataSize);
if(result != 0) NSLog(#"cannot find file size");
return (UInt32)outDataSize;
}
- (void)stopSound
{
alSourceStop(sourceID);
}
-(void)cleanUpOpenAL:(id)sender
{
// delete the sources
alDeleteSources(1, &sourceID);
// delete the buffers
alDeleteBuffers(1, &bufferID);
// destroy the context
alcDestroyContext(mContext);
// close the device
alcCloseDevice(mDevice);
}
-(IBAction)play:(id)sender
{
alSourcePlay(sourceID);
}
#pragma mark -
// Implement viewDidLoad to do additional setup after loading the view, typically from a nib.
- (void)viewDidLoad {
[super viewDidLoad];
[self initOpenAL];
// get the full path of the file
NSString* fileName = [[NSBundle mainBundle] pathForResource:#"sound" ofType:#"caf"];
// first, open the file
AudioFileID fileID = [self openAudioFile:fileName];
// find out how big the actual audio data is
UInt32 fileSize = [self audioFileSize:fileID];
// this is where the audio data will live for the moment
unsigned char * outData = malloc(fileSize);
// this where we actually get the bytes from the file and put them
// into the data buffer
OSStatus result = noErr;
result = AudioFileReadBytes(fileID, false, 0, &fileSize, outData);
AudioFileClose(fileID); //close the file
if (result != 0) NSLog(#"cannot load effect: %#", fileName);
//NSUInteger bufferID; // buffer is defined in head file
// grab a buffer ID from openAL
alGenBuffers(1, &bufferID);
// jam the audio data into the new buffer
alBufferData(bufferID, AL_FORMAT_STEREO16, outData, fileSize, 8000);
//NSUInteger sourceID; // source is defined in head file
// grab a source ID from openAL
alGenSources(1, &sourceID);
// attach the buffer to the source
alSourcei(sourceID, AL_BUFFER, bufferID);
// set some basic source prefs
alSourcef(sourceID, AL_PITCH, 1.0f);
alSourcef(sourceID, AL_GAIN, 1.0f);
alSourcei(sourceID, AL_LOOPING, AL_TRUE);
// clean up the buffer
if (outData)
{
free(outData);
outData = NULL;
}
}
You should be able to release outData right after your alBufferData() call. It exclude it as the culprit, you can try the static extension and manage the memory yourself. It's something like:
alBufferDataStaticProcPtr alBufferDataStaticProc = (alBufferDataStaticProcPtr)alcGetProcAddress(0, (const ALCchar *)"alBufferDataStatic");
alBufferDataStaticProc(bufferID, bitChanFormat, audioData, audioDataSize, dataFormat.mSampleRate);