OpenAL iPhone: unable to play any sound - iphone

I'm trying to get to play a simple sound file through OpenAL by referring this tutorial:
I've created a monolithic code from it to test initially, but cant get the sound to play. I've been trying a lot of stuff but I can't get the sound to play. Any help is much appreciated.
Thanks.
Here is my code:
ALCdevice* device;
device = alcOpenDevice(NULL);
ALCcontext* context;
alcCreateContext(device, NULL);
alcMakeContextCurrent(context);
NSString* path = [[NSBundle mainBundle] pathForResource:#"mg" ofType:#"caf"];
NSURL* pathURL = [NSURL fileURLWithPath:path];
AudioFileID audioID;
OSStatus audioStatus = AudioFileOpenURL((CFURLRef)pathURL, kAudioFileReadPermission, 0, &audioID);
UInt32 fileSize = 0;
UInt64 outDataSize;
UInt32 propSize = sizeof(UInt64);
OSStatus res = AudioFileGetProperty(audioID, kAudioFilePropertyAudioDataByteCount, &propSize, &outDataSize);
fileSize = (UInt32)outDataSize;
unsigned char* outData = malloc(fileSize);
OSStatus res2 = AudioFileReadBytes(audioID, false, 0, &fileSize, outData);
AudioFileClose(audioID);
ALuint bufferID;
alGenBuffers(1, &bufferID);
alBufferData(bufferID, AL_FORMAT_STEREO16, outData, fileSize, 44100);
ALuint sourceID = 2;
alGenSources(1, &sourceID);
alSourcei(sourceID, AL_BUFFER, bufferID);
alSourcef(sourceID, AL_PITCH, 1.0f);
alSourcef(sourceID, AL_GAIN, 1.0f);
alSourcei(sourceID, AL_LOOPING, AL_FALSE);
free(outData);
outData = NULL;
alSourcePlay(sourceID);

Just found out!
I have been using:
alcCreateContext(device, NULL);
instead of:
context = alcCreateContext(device, NULL);
The alGetError() helped in pointing this out.

Related

how to setup Effect Audio Unit in iOS

My task is to play an audio file that is saved locally in documents directory, apply audio effect in that audio file using Effect Audio Unit and save a new audio file in documents directory with that effect.
Here is my code that i have written so far, but its not working. Effects are not being applied in the audio. Please suggest me what is wrong in this code ?? Thanks in advance..
- (void) setUpAudioUnits
{
OSStatus setupErr = noErr;
// describe unit
AudioComponentDescription audioCompDesc;
audioCompDesc.componentType = kAudioUnitType_Output;
audioCompDesc.componentSubType = kAudioUnitSubType_RemoteIO;
audioCompDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
audioCompDesc.componentFlags = 0;
audioCompDesc.componentFlagsMask = 0;
// get rio unit from audio component manager
AudioComponent rioComponent = AudioComponentFindNext(NULL, &audioCompDesc);
setupErr = AudioComponentInstanceNew(rioComponent, &remoteIOUnit);
NSAssert (setupErr == noErr, #"Couldn't get RIO unit instance");
// set up the rio unit for playback
UInt32 oneFlag = 1;
AudioUnitElement outputElement = 0;
setupErr =
AudioUnitSetProperty (remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
outputElement,
&oneFlag,
sizeof(oneFlag));
NSAssert (setupErr == noErr, #"Couldn't enable RIO output");
// enable rio input
AudioUnitElement inputElement = 1;
// setup an asbd in the iphone canonical format
AudioStreamBasicDescription myASBD;
memset (&myASBD, 0, sizeof (myASBD));
// myASBD.mSampleRate = 44100;
myASBD.mSampleRate = hardwareSampleRate;
myASBD.mFormatID = kAudioFormatLinearPCM;
myASBD.mFormatFlags = kAudioFormatFlagsCanonical;
myASBD.mBytesPerPacket = 4;
myASBD.mFramesPerPacket = 1;
myASBD.mBytesPerFrame = 4;
myASBD.mChannelsPerFrame = 2;
myASBD.mBitsPerChannel = 16;
/*
// set format for output (bus 0) on rio's input scope
*/
setupErr =
AudioUnitSetProperty (remoteIOUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
outputElement,
&myASBD,
sizeof (myASBD));
NSAssert (setupErr == noErr, #"Couldn't set ASBD for RIO on input scope / bus 0");
// song must be an LPCM file, preferably in caf container
// to convert, use /usr/bin/afconvert, like this:
// /usr/bin/afconvert --data LEI16 Girlfriend.m4a song.caf
// read in the entire audio file (NOT recommended)
// better to use a ring buffer: thread or timer fills, render callback drains
NSURL *songURL = [NSURL fileURLWithPath:
[[NSBundle mainBundle] pathForResource: #"song"
ofType: #"caf"]];
AudioFileID songFile;
setupErr = AudioFileOpenURL((CFURLRef) songURL,
kAudioFileReadPermission,
0,
&songFile);
NSAssert (setupErr == noErr, #"Couldn't open audio file");
UInt64 audioDataByteCount;
UInt32 audioDataByteCountSize = sizeof (audioDataByteCount);
setupErr = AudioFileGetProperty(songFile,
kAudioFilePropertyAudioDataByteCount,
&audioDataByteCountSize,
&audioDataByteCount);
NSAssert (setupErr == noErr, #"Couldn't get size property");
musicPlaybackState.audioData = malloc (audioDataByteCount);
musicPlaybackState.audioDataByteCount = audioDataByteCount;
musicPlaybackState.samplePtr = musicPlaybackState.audioData;
NSLog (#"reading %qu bytes from file", audioDataByteCount);
UInt32 bytesRead = audioDataByteCount;
setupErr = AudioFileReadBytes(songFile,
false,
0,
&bytesRead,
musicPlaybackState.audioData);
NSAssert (setupErr == noErr, #"Couldn't read audio data");
NSLog (#"read %d bytes from file", bytesRead);
AudioStreamBasicDescription fileASBD;
UInt32 asbdSize = sizeof (fileASBD);
setupErr = AudioFileGetProperty(songFile,
kAudioFilePropertyDataFormat,
&asbdSize,
&fileASBD);
NSAssert (setupErr == noErr, #"Couldn't get file asbd");
ExtAudioFileCreateWithURL(outputFileURL,
kAudioFileCAFType,
&fileASBD,
nil,
kAudioFileFlags_EraseFile,
&musicPlaybackState.extAudioFile);
// get the mixer unit
AudioComponentDescription mixerDesc;
mixerDesc.componentType = kAudioUnitType_Effect;
mixerDesc.componentSubType = kAudioUnitSubType_Delay;
mixerDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
mixerDesc.componentFlags = 0;
mixerDesc.componentFlagsMask = 0;
// get mixer unit from audio component manager
AudioComponent mixerComponent = AudioComponentFindNext(NULL, &mixerDesc);
setupErr = AudioComponentInstanceNew(mixerComponent, &mixerUnit);
NSAssert (setupErr == noErr, #"Couldn't get mixer unit instance");
// set up connections and callbacks
// connect mixer bus 0 input to robot voice render callback
effectState.rioUnit = remoteIOUnit;
effectState.sineFrequency = 23;
effectState.sinePhase = 0;
effectState.asbd = myASBD;
// connect mixer bus 1 input to music player callback
AURenderCallbackStruct musicPlayerCallbackStruct;
musicPlayerCallbackStruct.inputProc = MusicPlayerCallback; // callback function
musicPlayerCallbackStruct.inputProcRefCon = &musicPlaybackState;
setupErr =
AudioUnitSetProperty(mixerUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
outputElement,
&musicPlayerCallbackStruct,
sizeof (musicPlayerCallbackStruct));
NSAssert (setupErr == noErr, #"Couldn't set mixer render callback on bus 1");
// direct connect mixer to output
AudioUnitConnection connection;
connection.sourceAudioUnit = mixerUnit;
connection.sourceOutputNumber = outputElement;
connection.destInputNumber = outputElement;
setupErr =
AudioUnitSetProperty(remoteIOUnit,
kAudioUnitProperty_MakeConnection,
kAudioUnitScope_Input,
outputElement,
&connection,
sizeof (connection));
NSAssert (setupErr == noErr, #"Couldn't set mixer-to-RIO connection");
setupErr = AudioUnitInitialize(mixerUnit);
NSAssert (setupErr == noErr, #"Couldn't initialize mixer unit");
setupErr = AudioUnitInitialize(remoteIOUnit);
NSAssert (setupErr == noErr, #"Couldn't initialize RIO unit");
setupErr = AudioOutputUnitStart (remoteIOUnit);
}
When you have instance of initialized audio unit, you can apply effect to sound using AudioUnitRender by providing AudioBufferList to it.
First of all, make sure that you have sound in format which accepted by Audio Unit. You can get this format by getting kAudioUnitProperty_StreamFormat property.
If your audio file has different format than one you got from audio unit, you can convert audio "on the fly" by using ExtAudioFile. To achieve this, you must set kExtAudioFileProperty_ClientDataFormat property in ExtAudioFile to format which you got from 'kAudioUnitProperty_StreamFormat'. Now, when you will read audio file you will get audio in needed format.
Also, make sure that kAudioUnitProperty_ShouldAllocateBuffer property of Audio Unit is set to 1.
To call AudioUnitRender you must prepare valid AudioTimeStamp, AudioUnitRenderActionFlags (can be set to 0) and AudioBufferList. You don't need to allocate memory for buffers, you need just provide number of buffers and it's size.
AudioBufferList *buffer = malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer));
buffer->mNumberBuffers = 2; // at least 2 buffers
buffer->mBuffers[0].mDataByteSize = ...; // size of one buffer
buffer->mBuffers[1].mDataByteSize = ...;
AudioUnitRenderActionFlags flags = 0;
AudioTimeStamp timeStamp;
memset(&timeStamp, 0, sizeof(AudioTimeStamp));
timeStamp.mFlags = kAudioTimeStampSampleTimeValid;
UInt32 frames = ...; // number of frames in buffer
AudioUnit unit = ...; // your Delay unit
Now you can call AudioUnitRender:
AudioUnitRender(unit, &flags, &timeStamp, 0, frames, buffer);
Audio unit will ask callback for fill buffers and apply effect to sound, after that you will have buffers with valid audio. In this case you need to set kAudioUnitProperty_SetRenderCallback property to valid callback.

Length of the audiofile

How to find the total duration of the audiofile playing so that i can program to display the contents of the audiofile in viewcontrollers according to the time.
if ([audioPlayer currentTime] ==11){
[self performSelector:#selector(viewController) withObject:nil];
} else {
if ([audioPlayer currentTime] ==23){
[self performSelector:#selector(secondViewController) withObject:nil];
}
If you're using AVAudioPlayer, you can use it's duration property to retrieve the length of the loaded audio file in seconds.
Use the AudioFileServices functions...
NSURL *afUrl = [NSURL fileURLWithPath:soundPath];
AudioFileID fileID;
OSStatus result = AudioFileOpenURL((CFURLRef)afUrl, kAudioFileReadPermission, 0, &fileID);
UInt64 outDataSize = 0;
UInt32 thePropSize = sizeof(UInt64);
result = AudioFileGetProperty(fileID, kAudioFilePropertyEstimatedDuration, &thePropSize, &outDataSize);
AudioFileClose(fileID);
for more info refer these links.. http://developer.apple.com/library/ios/#codinghowtos/AudioAndVideo/_index.html
https://developer.apple.com/library/mac/#documentation/musicaudio/reference/AudioFileConvertRef/Reference/reference.html
EDIT: This is to get the length of file even if it is not associated with the AVPlayer.. so you can populate a list ... hoping this helps somebody.. :D

Using AVMutableAudioMix to adjust volumes for tracks within asset

I'm applying an AVMutableAudioMix to a asset I've created, the asset generally consists of 3-5 audio tracks (no video). The goal is to add several volume commands throughout the play time, ie I'd like to set the volume to 0.1 at 1 seconds, 0.5 at 2 seconds, then 0.1 or whatever at 3 seconds. I'm just now trying to do this with an AVPlayer but will also later use it when exporting the AVSession to a file. The problem is that it only seems to care about the first volume command, and seem to ignore all later volume commands. If the first command is to set the volume to 0.1, that will be the permanent volume for this track for the rest of this asset. Despite it really looks like you should be able to add any number of these commands, seeing as the "inputParameters" member of AVMutableAudioMix is really an NSArray which is the series of AVMutableAudioMixInputParameter's. Anyone figured this out?
Edit: I figured this partly out. I'm able to add several volume changes throughout a certain track. But the timings appear way off, I'm not sure how to fix that. For example setting the volume to 0.0 at 5 seconds, then 1.0 at 10 seconds and then back to 0.0 at 15 seconds would make you assume the volume would go on and off promptly at those timings, but the results are always very unpredictable, with ramping of sounds going on, and sometimes working (with sudden volume changes as expected from setVolume). If anyone got the AudioMix to work, please provide an example.
The code I use to change the track volume is:
AVURLAsset *soundTrackAsset = [[AVURLAsset alloc]initWithURL:trackUrl options:nil];
AVMutableAudioMixInputParameters *audioInputParams = [AVMutableAudioMixInputParameters audioMixInputParameters];
[audioInputParams setVolume:0.5 atTime:kCMTimeZero];
[audioInputParams setTrackID:[[[soundTrackAsset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0] trackID]];
audioMix = [AVMutableAudioMix audioMix];
audioMix.inputParameters = [NSArray arrayWithObject:audioInputParams];
Don't forget to add the audiomix to your AVAssetExportSession
exportSession.audioMix = audioMix;
However, I notice it does not work with all formats so You can use this function to change the volume level of an stored file if you keep having issues with AVFoundation. However, this function could be quite slow.
-(void) ScaleAudioFileAmplitude:(NSURL *)theURL: (float) ampScale {
OSStatus err = noErr;
ExtAudioFileRef audiofile;
ExtAudioFileOpenURL((CFURLRef)theURL, &audiofile);
assert(audiofile);
// get some info about the file's format.
AudioStreamBasicDescription fileFormat;
UInt32 size = sizeof(fileFormat);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileDataFormat, &size, &fileFormat);
// we'll need to know what type of file it is later when we write
AudioFileID aFile;
size = sizeof(aFile);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_AudioFile, &size, &aFile);
AudioFileTypeID fileType;
size = sizeof(fileType);
err = AudioFileGetProperty(aFile, kAudioFilePropertyFileFormat, &size, &fileType);
// tell the ExtAudioFile API what format we want samples back in
AudioStreamBasicDescription clientFormat;
bzero(&clientFormat, sizeof(clientFormat));
clientFormat.mChannelsPerFrame = fileFormat.mChannelsPerFrame;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = clientFormat.mBytesPerFrame;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBitsPerChannel = 32;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mSampleRate = fileFormat.mSampleRate;
clientFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved;
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
// find out how many frames we need to read
SInt64 numFrames = 0;
size = sizeof(numFrames);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileLengthFrames, &size, &numFrames);
// create the buffers for reading in data
AudioBufferList *bufferList = malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * (clientFormat.mChannelsPerFrame - 1));
bufferList->mNumberBuffers = clientFormat.mChannelsPerFrame;
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
bufferList->mBuffers[ii].mDataByteSize = sizeof(float) * numFrames;
bufferList->mBuffers[ii].mNumberChannels = 1;
bufferList->mBuffers[ii].mData = malloc(bufferList->mBuffers[ii].mDataByteSize);
}
// read in the data
UInt32 rFrames = (UInt32)numFrames;
err = ExtAudioFileRead(audiofile, &rFrames, bufferList);
// close the file
err = ExtAudioFileDispose(audiofile);
// process the audio
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
float *fBuf = (float *)bufferList->mBuffers[ii].mData;
for (int jj=0; jj < rFrames; ++jj) {
*fBuf = *fBuf * ampScale;
fBuf++;
}
}
// open the file for writing
err = ExtAudioFileCreateWithURL((CFURLRef)theURL, fileType, &fileFormat, NULL, kAudioFileFlags_EraseFile, &audiofile);
// tell the ExtAudioFile API what format we'll be sending samples in
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
// write the data
err = ExtAudioFileWrite(audiofile, rFrames, bufferList);
// close the file
ExtAudioFileDispose(audiofile);
// destroy the buffers
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
free(bufferList->mBuffers[ii].mData);
}
free(bufferList);
bufferList = NULL;
}
Please also note that you may need to fine tune the ampScale you want depending where your volume value is coming from. The system volume goes from 0 to 1 and can be obtained by calling AudioSessionGetProperty
Float32 volume;
UInt32 dataSize = sizeof(Float32);
AudioSessionGetProperty (
kAudioSessionProperty_CurrentHardwareOutputVolume,
&dataSize,
&volume
);
The Audio Extension Toolbox function doesn't quite work anymore as is due to API changes. It now requires you to setup a category. When setting the export properties I was getting an error code of '?cat' (which the NSError will print out in decimal).
Here is the code that works now in iOS 5.1. It is incredibly slow too, just by looking I'd say several times slower. It is also memory intensive since it appear to load the file into memory, which generates memory warnings for 10MB mp3 files.
-(void) scaleAudioFileAmplitude:(NSURL *)theURL withAmpScale:(float) ampScale
{
OSStatus err = noErr;
ExtAudioFileRef audiofile;
ExtAudioFileOpenURL((CFURLRef)theURL, &audiofile);
assert(audiofile);
// get some info about the file's format.
AudioStreamBasicDescription fileFormat;
UInt32 size = sizeof(fileFormat);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileDataFormat, &size, &fileFormat);
// we'll need to know what type of file it is later when we write
AudioFileID aFile;
size = sizeof(aFile);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_AudioFile, &size, &aFile);
AudioFileTypeID fileType;
size = sizeof(fileType);
err = AudioFileGetProperty(aFile, kAudioFilePropertyFileFormat, &size, &fileType);
// tell the ExtAudioFile API what format we want samples back in
AudioStreamBasicDescription clientFormat;
bzero(&clientFormat, sizeof(clientFormat));
clientFormat.mChannelsPerFrame = fileFormat.mChannelsPerFrame;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = clientFormat.mBytesPerFrame;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBitsPerChannel = 32;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mSampleRate = fileFormat.mSampleRate;
clientFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved;
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
// find out how many frames we need to read
SInt64 numFrames = 0;
size = sizeof(numFrames);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileLengthFrames, &size, &numFrames);
// create the buffers for reading in data
AudioBufferList *bufferList = malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * (clientFormat.mChannelsPerFrame - 1));
bufferList->mNumberBuffers = clientFormat.mChannelsPerFrame;
//printf("bufferList->mNumberBuffers = %lu \n\n", bufferList->mNumberBuffers);
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
bufferList->mBuffers[ii].mDataByteSize = sizeof(float) * numFrames;
bufferList->mBuffers[ii].mNumberChannels = 1;
bufferList->mBuffers[ii].mData = malloc(bufferList->mBuffers[ii].mDataByteSize);
}
// read in the data
UInt32 rFrames = (UInt32)numFrames;
err = ExtAudioFileRead(audiofile, &rFrames, bufferList);
// close the file
err = ExtAudioFileDispose(audiofile);
// process the audio
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
float *fBuf = (float *)bufferList->mBuffers[ii].mData;
for (int jj=0; jj < rFrames; ++jj) {
*fBuf = *fBuf * ampScale;
fBuf++;
}
}
// open the file for writing
err = ExtAudioFileCreateWithURL((CFURLRef)theURL, fileType, &fileFormat, NULL, kAudioFileFlags_EraseFile, &audiofile);
NSError *error = NULL;
/*************************** You Need This Now ****************************/
AVAudioSession *session = [AVAudioSession sharedInstance];
[session setCategory:AVAudioSessionCategoryAudioProcessing error:&error];
/************************* End You Need This Now **************************/
// tell the ExtAudioFile API what format we'll be sending samples in
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
error = [NSError errorWithDomain:NSOSStatusErrorDomain
code:err
userInfo:nil];
NSLog(#"Error: %#", [error description]);
// write the data
err = ExtAudioFileWrite(audiofile, rFrames, bufferList);
// close the file
ExtAudioFileDispose(audiofile);
// destroy the buffers
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
free(bufferList->mBuffers[ii].mData);
}
free(bufferList);
bufferList = NULL;
}
Thanks for the help provided in this post.
I just would like to add one thing as you should restore the AVAudioSession back to what it was or you'll end up not playing anything.
AVAudioSession *session = [AVAudioSession sharedInstance];
NSString *originalSessionCategory = [session category];
[session setCategory:AVAudioSessionCategoryAudioProcessing error:&error];
...
...
// restore category
[session setCategory:originalSessionCategory error:&error];
if(error)
NSLog(#"%#",[error localizedDescription]);
Cheers
For Setting the different volumes of Mutable Tracks you can use below Code
self.audioMix = [AVMutableAudioMix audioMix];
AVMutableAudioMixInputParameters *audioInputParams = [AVMutableAudioMixInputParameters audioMixInputParameters];
[audioInputParams setVolume:0.1 atTime:kCMTimeZero];
[audioInputParams setVolume:0.1 atTime:kCMTimeZero];
audioInputParams.trackID = compositionAudioTrack2.trackID;
AVMutableAudioMixInputParameters *audioInputParams1 = [AVMutableAudioMixInputParameters audioMixInputParameters];
[audioInputParams1 setVolume:0.9 atTime:kCMTimeZero];
audioInputParams1.trackID = compositionAudioTrack1.trackID;
AVMutableAudioMixInputParameters *audioInputParams2 = [AVMutableAudioMixInputParameters audioMixInputParameters];
[audioInputParams2 setVolume:0.3 atTime:kCMTimeZero];
audioInputParams2.trackID = compositionAudioTrack.trackID;
self.audioMix.inputParameters =[NSArray arrayWithObjects:audioInputParams,audioInputParams1,audioInputParams2, nil];

Reading audio samples via AVAssetReader

How do you read audio samples via AVAssetReader? I've found examples of duplicating or mixing using AVAssetReader, but those loops are always controlled by the AVAssetWriter loop. Is it possible just to create an AVAssetReader and read through it, getting each sample and throwing the int32 of each audio sample into an array?
Thanks.
To expand on #amrox's answer, you can get an AudioBufferList from the CMBlockBufferRef, e.g.
CMItemCount numSamplesInBuffer = CMSampleBufferGetNumSamples(buffer);
AudioBufferList audioBufferList;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
buffer,
NULL,
&audioBufferList,
sizeof(audioBufferList),
NULL,
NULL,
kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment,
&buffer
);
for (int bufferCount=0; bufferCount < audioBufferList.mNumberBuffers; bufferCount++) {
SInt16* samples = (SInt16 *)audioBufferList.mBuffers[bufferCount].mData;
for (int i=0; i < numSamplesInBuffer; i++) {
// amplitude for the sample is samples[i], assuming you have linear pcm to start with
}
}
//Release the buffer when done with the samples
//(retained by CMSampleBufferGetAudioBufferListWithRetainedblockBuffer)
CFRelease(buffer);
AVAssetReader *reader = [[AVAssetReader alloc] initWithAsset:asset error:&error];
AVAssetTrack *track = [[asset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0];
NSDictionary *settings = #{ AVFormatIDKey : [NSNumber numberWithInt:kAudioFormatLinearPCM] };
AVAssetReaderTrackOutput *readerOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:track
outputSettings:settings];
[reader addOutput:readerOutput];
[reader startReading];
CMSampleBufferRef sample = [readerOutput copyNextSampleBuffer];
while ( sample )
{
sample = [readerOutput copyNextSampleBuffer];
if ( ! sample )
{
continue;
}
CMBlockBufferRef buffer = CMSampleBufferGetDataBuffer(sample);
size_t lengthAtOffset;
size_t totalLength;
char *data;
if ( CMBlockBufferGetDataPointer( buffer, 0, &lengthAtOffset, &totalLength, &data ) != noErr )
{
NSLog(#"error!");
break;
}
// do something with data...
CFRelease(sample);
}
The answers here are not generic. The call to CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer might fail when the AudioBufferList needs to be sized differently. When having non-intererleaved samples as example.
The correct way is to call CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer twice. The first call queries the size needed for the AudioBufferList and second one actually fills the AudioBufferList.
size_t bufferSize = 0;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
&bufferSize,
NULL,
0,
NULL,
NULL,
0,
NULL
);
AudioBufferList *bufferList = malloc(bufferSize);
CMBlockBufferRef blockBuffer = NULL;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
NULL,
bufferList,
bufferSize,
NULL,
NULL,
kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment,
&blockBuffer
);
// handle audio here
free(bufferList);
CFRelease(blockBuffer);
In a real world example you must perform error handling and also you should not malloc every frame, instead cache the AudioBufferList.

How to play looping sound with OpenAL on iPhone

I'm following a tutorial about playing sound with OpenAL. Now everything works fine except I can't make the sound looping. I believe that I've used AL_LOOPING for the source. Now it can only play once and when it finishes playing, the app will block(doesn't response to my tap on the play button). Any ideas about what's wrong with the code?
// start up openAL
// init device and context
-(void)initOpenAL
{
// Initialization
mDevice = alcOpenDevice(NULL); // select the "preferred device"
if (mDevice) {
// use the device to make a context
mContext = alcCreateContext(mDevice, NULL);
// set my context to the currently active one
alcMakeContextCurrent(mContext);
}
}
// open the audio file
// returns a big audio ID struct
-(AudioFileID)openAudioFile:(NSString*)filePath
{
AudioFileID outAFID;
// use the NSURl instead of a cfurlref cuz it is easier
NSURL * afUrl = [NSURL fileURLWithPath:filePath];
// do some platform specific stuff..
#if TARGET_OS_IPHONE
OSStatus result = AudioFileOpenURL((CFURLRef)afUrl, kAudioFileReadPermission, 0, &outAFID);
#else
OSStatus result = AudioFileOpenURL((CFURLRef)afUrl, fsRdPerm, 0, &outAFID);
#endif
if (result != 0) NSLog(#"cannot openf file: %#",filePath);
return outAFID;
}
// find the audio portion of the file
// return the size in bytes
-(UInt32)audioFileSize:(AudioFileID)fileDescriptor
{
UInt64 outDataSize = 0;
UInt32 thePropSize = sizeof(UInt64);
OSStatus result = AudioFileGetProperty(fileDescriptor, kAudioFilePropertyAudioDataByteCount, &thePropSize, &outDataSize);
if(result != 0) NSLog(#"cannot find file size");
return (UInt32)outDataSize;
}
- (void)stopSound
{
alSourceStop(sourceID);
}
-(void)cleanUpOpenAL:(id)sender
{
// delete the sources
alDeleteSources(1, &sourceID);
// delete the buffers
alDeleteBuffers(1, &bufferID);
// destroy the context
alcDestroyContext(mContext);
// close the device
alcCloseDevice(mDevice);
}
-(IBAction)play:(id)sender
{
alSourcePlay(sourceID);
}
#pragma mark -
// Implement viewDidLoad to do additional setup after loading the view, typically from a nib.
- (void)viewDidLoad {
[super viewDidLoad];
[self initOpenAL];
// get the full path of the file
NSString* fileName = [[NSBundle mainBundle] pathForResource:#"sound" ofType:#"caf"];
// first, open the file
AudioFileID fileID = [self openAudioFile:fileName];
// find out how big the actual audio data is
UInt32 fileSize = [self audioFileSize:fileID];
// this is where the audio data will live for the moment
unsigned char * outData = malloc(fileSize);
// this where we actually get the bytes from the file and put them
// into the data buffer
OSStatus result = noErr;
result = AudioFileReadBytes(fileID, false, 0, &fileSize, outData);
AudioFileClose(fileID); //close the file
if (result != 0) NSLog(#"cannot load effect: %#", fileName);
//NSUInteger bufferID; // buffer is defined in head file
// grab a buffer ID from openAL
alGenBuffers(1, &bufferID);
// jam the audio data into the new buffer
alBufferData(bufferID, AL_FORMAT_STEREO16, outData, fileSize, 8000);
//NSUInteger sourceID; // source is defined in head file
// grab a source ID from openAL
alGenSources(1, &sourceID);
// attach the buffer to the source
alSourcei(sourceID, AL_BUFFER, bufferID);
// set some basic source prefs
alSourcef(sourceID, AL_PITCH, 1.0f);
alSourcef(sourceID, AL_GAIN, 1.0f);
alSourcei(sourceID, AL_LOOPING, AL_TRUE);
// clean up the buffer
if (outData)
{
free(outData);
outData = NULL;
}
}
You should be able to release outData right after your alBufferData() call. It exclude it as the culprit, you can try the static extension and manage the memory yourself. It's something like:
alBufferDataStaticProcPtr alBufferDataStaticProc = (alBufferDataStaticProcPtr)alcGetProcAddress(0, (const ALCchar *)"alBufferDataStatic");
alBufferDataStaticProc(bufferID, bitChanFormat, audioData, audioDataSize, dataFormat.mSampleRate);