AudioQueue how to find out playback length of queued data - iphone

I am using AudioQueue to stream some song, my question is how can i tell the length of playback of already queued buffers? I want to stream two seconds of data at a time, the problem i am having is how do i know how many bytes actually correspond to two seconds of music (so i can always be ahead by two seconds).
Thanks
Daniel

Here is a class that uses Audio File Services to get at bitrate / packet / frame data to grab the amount of bytes from a music file that correspond to x seconds, the example has been tested with mp3 and m4a files
Header
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#interface MusicChunker : NSObject
{
AudioFileID audioFile;
int _sampleRate;
int _totalFrames;
UInt64 _framesPerPacket;
UInt64 _totalPackets;
UInt64 fileDataSize;
AudioFilePacketTableInfo _packetInfo;
int _fileLength;
AudioStreamBasicDescription _fileDataFormat;
NSFileHandle * _fileHandle;
int _packetOffset;
int _totalReadBytes;
int _maxPacketSize;
BOOL firstTime;
BOOL _ism4a;
}
-(id)initWithURL:(NSURL*)url andFileType:(NSString*)ext;
//gets next chunk that corresponds to seconds of audio
-(NSData*)getNextDataChunk:(int)seconds;
#end
Implementation
#import "MusicChunker.h"
void ReportAudioError(OSStatus statusCode);
#implementation MusicChunker
- (id)init
{
self = [super init];
if (self) {
// Initialization code here.
}
return self;
}
void ReportAudioError(OSStatus statusCode) {
switch (statusCode) {
case noErr:
break;
case kAudioFileUnspecifiedError:
[NSException raise:#"AudioFileUnspecifiedError" format:#"An unspecified error occured."];
break;
case kAudioFileUnsupportedDataFormatError:
[NSException raise:#"AudioFileUnsupportedDataFormatError" format:#"The data format is not supported by the output file type."];
break;
case kAudioFileUnsupportedFileTypeError:
[NSException raise:#"AudioFileUnsupportedFileTypeError" format:#"The file type is not supported."];
break;
case kAudioFileUnsupportedPropertyError:
[NSException raise:#"AudioFileUnsupportedPropertyError" format:#"A file property is not supported."];
break;
case kAudioFilePermissionsError:
[NSException raise:#"AudioFilePermissionsError" format:#"The operation violated the file permissions. For example, an attempt was made to write to a file opened with the kAudioFileReadPermission constant."];
break;
case kAudioFileNotOptimizedError:
[NSException raise:#"AudioFileNotOptimizedError" format:#"The chunks following the audio data chunk are preventing the extension of the audio data chunk. To write more data, you must optimize the file."];
break;
case kAudioFileInvalidChunkError:
[NSException raise:#"AudioFileInvalidChunkError" format:#"Either the chunk does not exist in the file or it is not supported by the file."];
break;
case kAudioFileDoesNotAllow64BitDataSizeError:
[NSException raise:#"AudioFileDoesNotAllow64BitDataSizeError" format:#"The file offset was too large for the file type. The AIFF and WAVE file format types have 32-bit file size limits."];
break;
case kAudioFileInvalidPacketOffsetError:
[NSException raise:#"AudioFileInvalidPacketOffsetError" format:#"A packet offset was past the end of the file, or not at the end of the file when a VBR format was written, or a corrupt packet size was read when the packet table was built."];
break;
case kAudioFileInvalidFileError:
[NSException raise:#"AudioFileInvalidFileError" format:#"The file is malformed, or otherwise not a valid instance of an audio file of its type."];
break;
case kAudioFileOperationNotSupportedError:
[NSException raise:#"AudioFileOperationNotSupportedError" format:#"The operation cannot be performed. For example, setting the kAudioFilePropertyAudioDataByteCount constant to increase the size of the audio data in a file is not a supported operation. Write the data instead."];
break;
case -50:
[NSException raise:#"AudioFileBadParameter" format:#"An invalid parameter was passed, possibly the current packet and/or the inNumberOfPackets."];
break;
default:
[NSException raise:#"AudioFileUknownError" format:#"An unknown error type %# occured. [%s]", [NSNumber numberWithInteger:statusCode], (char*)&statusCode];
break;
}
}
+ (AudioFileTypeID)hintForFileExtension:(NSString *)fileExtension
{
AudioFileTypeID fileTypeHint = kAudioFileAAC_ADTSType;
if ([fileExtension isEqual:#"mp3"])
{
fileTypeHint = kAudioFileMP3Type;
}
else if ([fileExtension isEqual:#"wav"])
{
fileTypeHint = kAudioFileWAVEType;
}
else if ([fileExtension isEqual:#"aifc"])
{
fileTypeHint = kAudioFileAIFCType;
}
else if ([fileExtension isEqual:#"aiff"])
{
fileTypeHint = kAudioFileAIFFType;
}
else if ([fileExtension isEqual:#"m4a"])
{
fileTypeHint = kAudioFileM4AType;
}
else if ([fileExtension isEqual:#"mp4"])
{
fileTypeHint = kAudioFileMPEG4Type;
}
else if ([fileExtension isEqual:#"caf"])
{
fileTypeHint = kAudioFileCAFType;
}
else if ([fileExtension isEqual:#"aac"])
{
fileTypeHint = kAudioFileAAC_ADTSType;
}
return fileTypeHint;
}
-(id)initWithURL:(NSURL*)url andFileType:(NSString*)ext
{
self = [super init];
if (self) {
// Initialization code here.
//OSStatus theErr = noErr;
if([ext isEqualToString:#"mp3"])
{
_ism4a=FALSE;
}
else
_ism4a=TRUE;
firstTime=TRUE;
_packetOffset=0;
AudioFileTypeID hint=[MusicChunker hintForFileExtension:ext];
OSStatus theErr = AudioFileOpenURL((CFURLRef)url, kAudioFileReadPermission, hint, &audioFile);
if(theErr)
{
ReportAudioError(theErr);
}
UInt32 thePropertySize;// = sizeof(theFileFormat);
thePropertySize = sizeof(fileDataSize);
theErr = AudioFileGetProperty(audioFile, kAudioFilePropertyAudioDataByteCount, &thePropertySize, &fileDataSize);
if(theErr)
{
ReportAudioError(theErr);
}
theErr = AudioFileGetProperty(audioFile, kAudioFilePropertyAudioDataPacketCount, &thePropertySize, &_totalPackets);
if(theErr)
{
ReportAudioError(theErr);
}
/*
UInt32 size;
size= sizeof(_packetInfo);
theErr= AudioFileGetProperty(audioFile, kAudioFilePropertyPacketTableInfo, &size, &_packetInfo);
g(#"Key %#", key );
}
if(theErr)
{
ReportAudioError(theErr);
}
*/
UInt32 size;
size=sizeof(_maxPacketSize);
theErr=AudioFileGetProperty(audioFile, kAudioFilePropertyMaximumPacketSize , &size, &_maxPacketSize);
size = sizeof( _fileDataFormat );
theErr=AudioFileGetProperty( audioFile, kAudioFilePropertyDataFormat, &size, &_fileDataFormat );
_framesPerPacket=_fileDataFormat.mFramesPerPacket;
_totalFrames=_fileDataFormat.mFramesPerPacket*_totalPackets;
_fileHandle=[[NSFileHandle fileHandleForReadingFromURL:url error:nil] retain];
_fileLength=[_fileHandle seekToEndOfFile];
_sampleRate=_fileDataFormat.mSampleRate;
_totalReadBytes=0;
/*
AudioFramePacketTranslation tran;//= .mFrame = 0, .mPacket = packetCount - 1, .mFrameOffsetInPacket = 0 };
tran.mFrame=0;
tran.mFrameOffsetInPacket=0;
tran.mPacket=1;
UInt32 size=sizeof(tran);
theErr=AudioFileGetProperty(audioFile, kAudioFilePropertyPacketToFrame, &size, &tran);
*/
/*
AudioBytePacketTranslation bt;
bt.mPacket=4;
bt.mByteOffsetInPacket=0;
size=sizeof(bt);
theErr=AudioFileGetProperty(audioFile, kAudioFilePropertyPacketToByte, &size, &bt);
*/
}
return self;
}
//gets next chunk that corresponds to seconds of audio
-(NSData*)getNextDataChunk:(int)seconds
{
//NSLog(#"%d, total packets",_totalPackets);
if(_packetOffset>=_totalPackets)
return nil;
//sampleRate * seconds = number of wanted frames
int framesWanted= _sampleRate*seconds;
NSData *header=nil;
int wantedPackets= framesWanted/_framesPerPacket;
if(firstTime && _ism4a)
{
firstTime=false;
//when we have a header that was stripped off, we grab it from the original file
int totallen= [_fileHandle seekToEndOfFile];
int dif=totallen-fileDataSize;
[_fileHandle seekToFileOffset:0];
header= [_fileHandle readDataOfLength:dif];
}
int packetOffset=_packetOffset+wantedPackets;
//bound condition
if(packetOffset>_totalPackets)
{
packetOffset=_totalPackets;
}
UInt32 outBytes;
UInt32 packetCount = wantedPackets;
int x=packetCount * _maxPacketSize;
void *data = (void *)malloc(x);
OSStatus theErr=AudioFileReadPackets(audioFile, false, &outBytes, NULL, _packetOffset, &packetCount, data);
if(theErr)
{
ReportAudioError(theErr);
}
//calculate bytes to read
int bytesRead=outBytes;
//update read bytes
_totalReadBytes+=bytesRead;
// NSLog(#"total bytes read %d", _totalReadBytes);
_packetOffset=packetOffset;
NSData *subdata=[[NSData dataWithBytes:data length:outBytes] retain];
free(data);
if(header)
{
NSMutableData *data=[[NSMutableData alloc]init];
[data appendData:header];
[data appendData:subdata];
[subdata release];
return [data autorelease];
}
return [subdata autorelease];
}
#end

If the songs are in arbitrary compressed formats, and you want exactly 2 second snips, you may have to convert the songs into raw PCM samples or WAV data first (AVAssetReader, et. al.). Then you can count samples at a known sample rate. e.g. 88200 frames at a 44.1k sample rate would be 2 seconds worth.

Related

Convert audio Linear pcm to mp3 ( using LAME ) Low ,Medium,High audio Quality setting

I am encoding a LinearPCM to MP3 in iOS.I'm trying to encode the raw PCM data from microphone to MP3 using AudioToolbox framework and Lame.And although everything seems to run fine if i record an audio it is converted to mp3 with the help of lame encoding concept while play the recorded mp3 audio file working fine.now i want to convert audio(using lame) like low,medium,high quality mp3 file. i don't know exact setting(sample rate,bit depth,bit rate, chennal,quality) while lame conversion process
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
// NSLog(#"%f",inStartTime->mSampleTime);
try
{
if (inNumPackets > 0)
{
AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize, inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData);
aqr->mRecordPacket += inNumPackets;
int MP3_SIZE =inBuffer->mAudioDataByteSize * 4;
unsigned char mp3_buffer[MP3_SIZE];
lame_t lame = lame_init();
lame_set_in_samplerate(lame, 44100);
lame_set_VBR(lame, vbr_default);
lame_init_params(lame);
// int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);
int encodedBytes = lame_encode_buffer(lame, (short*)inBuffer->mAudioData, (short*)inBuffer->mAudioData, inNumPackets, mp3_buffer, MP3_SIZE);
[delegate.mp3AudioData appendBytes:mp3_buffer length:encodedBytes];
if (inBuffer->mAudioDataByteSize != 0) {
}
else
{
int encode=lame_encode_flush(lame, mp3_buffer, MP3_SIZE);
[delegate.mp3AudioData appendBytes:mp3_buffer length:encode];
}
lame_close(lame);
}
if (aqr->IsRunning())
{
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
}
} catch (CAXException e)
{
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
Try this,
Low quality:
AppDelegate *appDelegate = (AppDelegate *)[[UIApplication sharedApplication]delegate];
NSMutableDictionary *dictAudioQuality =[[NSMutableDictionary alloc]init];
[dictAudioQuality setValue:#"Low" forKey:#"audioquality"];
[dictAudioQuality setValue:#"11025" forKey:#"samplerate"];
[dictAudioQuality setValue:#"16" forKey:#"bitdepth"];
[dictAudioQuality setValue:#"120" forKey:#"bitrate"];
[dictAudioQuality setValue:#"1" forKey:#"channel"];
Medium Quality:
AppDelegate *appDelegate = (AppDelegate *)[[UIApplication sharedApplication]delegate];
NSMutableDictionary *dictAudioQuality =[[NSMutableDictionary alloc]init];
[dictAudioQuality setValue:#"Medium" forKey:#"audioquality"];
[dictAudioQuality setValue:#"22050" forKey:#"samplerate"];
[dictAudioQuality setValue:#"16" forKey:#"bitdepth"];
[dictAudioQuality setValue:#"240" forKey:#"bitrate"];
[dictAudioQuality setValue:#"1" forKey:#"channel"];
High Quality:
AppDelegate *appDelegate = (AppDelegate *)[[UIApplication sharedApplication]delegate];
NSMutableDictionary *dictAudioQuality =[[NSMutableDictionary alloc]init];
[dictAudioQuality setValue:#"High" forKey:#"audioquality"];
[dictAudioQuality setValue:#"44100" forKey:#"samplerate"];
[dictAudioQuality setValue:#"24" forKey:#"bitdepth"];
[dictAudioQuality setValue:#"320" forKey:#"bitrate"];
[dictAudioQuality setValue:#"2" forKey:#"channel"];
AQRecorder.m Start Record
void AQRecorder::StartRecord(CFStringRef inRecordFile)
{
int i, bufferByteSize;
UInt32 size;
delegate =[[UIApplication sharedApplication]delegate];
nSampleRate =[[delegate.dictMP3Quality valueForKey:#"samplerate"] intValue];
nBitRate =[[delegate.dictMP3Quality valueForKey:#"bitrate"] intValue];
nChannel =[[delegate.dictMP3Quality valueForKey:#"channel"] intValue];
try {
UInt32 category = kAudioSessionCategory_RecordAudio;
OSStatus error = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
if (error) printf("couldn't set audio category!");
// specify the recording format
SetupAudioFormat(kAudioFormatLinearPCM);
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
this /* userData */,
NULL /* run loop */, NULL /* run loop mode */,
0 /* flags */, &mQueue), "AudioQueueNewInput failed");
// get the record format back from the queue's audio converter --
// the file may require a more specific stream description than was necessary to create the encoder.
mRecordPacket = 0;
size = sizeof(mRecordFormat);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
&mRecordFormat, &size), "couldn't get queue's format");
// copy the cookie first to give the file object as much info as we can about the data going in
// not necessary for pcm, but required for some compressed audio
CopyEncoderCookieToFile();
// allocate and enqueue buffers
bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for half a second
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
// start the queue
mIsRunning = true;
XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
lame = lame_init();
lame_set_in_samplerate(lame, mRecordFormat.mSampleRate);
lame_set_out_samplerate(lame, nSampleRate);
lame_set_num_channels(lame, nChannel);
// lame_set_brate(lame, nBitRate);
lame_set_VBR(lame, vbr_default);
lame_init_params(lame);
}
catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");;
}
}
AQRecorder::MyInputBufferHandler
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
try
{
if (inNumPackets > 0)
{
AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize, inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData);
aqr->mRecordPacket += inNumPackets;
int MP3_SIZE =inNumPackets * 4;
unsigned char mp3_buffer[MP3_SIZE];
memset(mp3_buffer, 0, sizeof(mp3_buffer));
// int encodedBytes=lame_encode_buffer_interleaved(lame, (short int*)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);
int encodedBytes = lame_encode_buffer(aqr->lame, (short int*)inBuffer->mAudioData, (short int*)inBuffer->mAudioData, inNumPackets, mp3_buffer, MP3_SIZE);
[aqr->delegate.mp3AudioData appendBytes:mp3_buffer length:encodedBytes];
if (inBuffer->mAudioDataByteSize != 0) {
}
else
{
int encode=lame_encode_flush(aqr->lame, mp3_buffer, MP3_SIZE);
[aqr->delegate.mp3AudioData appendBytes:mp3_buffer length:encode];
}
{
NSLog(#"------------");
NSLog(#"%d",encodedBytes);
NSLog(#"%lu",inNumPackets);
NSLog(#"%d",MP3_SIZE);
NSLog(#"------------");
}
}
if (aqr->IsRunning())
{
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
}
} catch (CAXException e)
{
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}

Can't receive jpeg image from server

I am trying to receive a jpeg image from my c# server. The weird thing is when I run it with the debugger and have a break point anywhere in the method it works perfectly fine. Without a breakpoint then I get this error
Corrupt JPEG data: premature end of data segment
Here is my code
(void)stream:(NSStream *)stream handleEvent:(NSStreamEvent)eventCode {
NSMutableData *data;
data = [NSMutableData new];
switch(eventCode) {
case NSStreamEventHasBytesAvailable:
{
uint8_t buffer[1024];
int len;
while([inputStream hasBytesAvailable]) {
len = [inputStream read:buffer maxLength:sizeof(buffer)];
if (len > 0)
{
[data appendBytes:(const void*)buffer length:sizeof(buffer)];
}
}
UIImage *images = [[UIImage alloc]initWithData:data];
[dvdCover setImage:images];
} break;
case NSStreamEventEndEncountered:
{
//UIImage *images = [[UIImage alloc]initWithData:data];
//[dvdCover setImage:images];
} break;
}
}
hi you can check this code hop it will help you...
case NSStreamEventHasBytesAvailable:
{
uint32_t max_size = 1000000; // Max size of the received imaged you can modify it as your reqirement.
NSMutableData* buffer = [[NSMutableData alloc] initWithLength: max_size];
NSInteger totalBytesRead = 0;
NSInteger bytesRead = [(NSInputStream *)stream read: [buffer mutableBytes] maxLength: max_size];
if (bytesRead != 0) {
while (bytesRead > 0 && totalBytesRead + bytesRead < max_size) {
totalBytesRead+= bytesRead;
bytesRead = [(NSInputStream *)stream read: [buffer mutableBytes] + totalBytesRead maxLength: max_size - totalBytesRead];
}
if (bytesRead >= 0) {
totalBytesRead += bytesRead;
}
else {
// read failure, report error and bail (not forgetting to release buffer)
}
[buffer setLength: totalBytesRead];
yourImageName.image = [UIImage imageWithData: buffer];
[buffer release];
} break;
It seems you are assuming the whole JPEG image will be transferred in one chunk, and you can just read it with one occurence of 'HasBytesAvailable' event. However you should also consider the case where the JPEG image is transferred to you in multiple chunks.
It might work for you if you set breakpoint, because your code execution might be halted somewhere, and your network buffer had plenty time to receive all bytes of the image. But without breakpoints it might not have time to do so.
Try refactoring your code to accumulate the bytes chunk, and only assume it's done when all bytes have been transferred. (Normally you have to know beforehand how many bytes the image is going to be -- or you can just capture the end of stream event)

OpenAL alSourceUnqueueBuffers & alSourceUnqueueBuffers

everyone , I have a problem about the API-- alSourceUnqueueBuffers when I use the OpenAL Libaray.
My problem as follows:
1.I play a pcm-music though streaming mechanism.
2.The application can queue up one or multiple buffer names using alSourceQueueBuffers.
when a buffer has been processed. I want to fill new audio data in my function: getSourceState . but when I use the API of OpenAL alSourceUnqueueBuffers. it returns an error
--- AL_INVALID_OPERATION . I do this as the document about the OpenAL.
so I test a way to solve this problem. I use alSourceStop(source) before the api alSourceUnqueueBuffers, an use alSourcePlay(source) after i filled new data though
alBufferData & alSourceQueueBuffers. but it is bad. because It breaks down the music.
who can help me to find this problem ?
and where i can find more information and method about openAL?
I am waiting for your help . thanks , everyone.
so my code as follows:
.h:
#interface myPlayback : NSObject
{
ALuint source;
ALuint * buffers;
ALCcontext* context;
ALCdevice* device;
unsigned long long offset;
ALenum m_format;
ALsizei m_freq;
void* data;
}
#end
.m
- (void)initOpenAL
{
ALenum error;
// Create a new OpenAL Device
// Pass NULL to specify the system’s default output device
device = alcOpenDevice(NULL);
if (device != NULL)
{
// Create a new OpenAL Context
// The new context will render to the OpenAL Device just created
context = alcCreateContext(device, 0);
if (context != NULL)
{
// Make the new context the Current OpenAL Context
alcMakeContextCurrent(context);
// Create some OpenAL Buffer Objects
buffers = (ALuint*)malloc(sizeof(ALuint) * 5);
alGenBuffers(5, buffers);
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"Error Generating Buffers: %x", error);
exit(1);
}
// Create some OpenAL Source Objects
alGenSources(1, &source);
if(alGetError() != AL_NO_ERROR)
{
NSLog(#"Error generating sources! %x\n", error);
exit(1);
}
}
}
// clear any errors
alGetError();
[self initBuffer];
[self initSource];
}
- (void) initBuffer
{
ALenum error = AL_NO_ERROR;
ALenum format;
ALsizei size;
ALsizei freq;
NSBundle* bundle = [NSBundle mainBundle];
// get some audio data from a wave file
CFURLRef fileURL = (CFURLRef)[[NSURL fileURLWithPath:[bundle pathForResource:#"4" ofType:#"caf"]] retain];
if (fileURL)
{
data = MyGetOpenALAudioData(fileURL, &size, &format, &freq);
CFRelease(fileURL);
m_freq = freq;
m_format = format;
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"error loading sound: %x\n", error);
exit(1);
}
alBufferData(buffers[0], format, data, READ_SIZE , freq);
offset += READ_SIZE;
alBufferData(buffers[1], format, data + offset, READ_SIZE, freq);
offset += READ_SIZE;
alBufferData(buffers[2], format, data + offset, READ_SIZE, freq);
offset += READ_SIZE;
alBufferData(buffers[3], format, data + offset, READ_SIZE, freq);
offset += READ_SIZE;
alBufferData(buffers[4], format, data + offset, READ_SIZE, freq);
offset += READ_SIZE;
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"error attaching audio to buffer: %x\n", error);
}
}
else
NSLog(#"Could not find file!\n");
}
- (void) initSource
{
ALenum error = AL_NO_ERROR;
alGetError(); // Clear the error
// Turn Looping ON
alSourcei(source, AL_LOOPING, AL_TRUE);
// Set Source Position
float sourcePosAL[] = {sourcePos.x, kDefaultDistance, sourcePos.y};
alSourcefv(source, AL_POSITION, sourcePosAL);
// Set Source Reference Distance
alSourcef(source, AL_REFERENCE_DISTANCE, 50.0f);
alSourceQueueBuffers(source, 5, buffers);
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"Error attaching buffer to source: %x\n", error);
exit(1);
}
}
- (void)startSound
{
ALenum error;
NSLog(#"Start!\n");
// Begin playing our source file
alSourcePlay(source);
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"error starting source: %x\n", error);
} else {
// Mark our state as playing (the view looks at this)
self.isPlaying = YES;
}
while (1) {
[self getSourceState];
}
}
-(void)getSourceState
{
int queued;
int processed;
int state;
alGetSourcei(source, AL_BUFFERS_QUEUED, &queued);
alGetSourcei(source, AL_BUFFERS_PROCESSED, &processed);
alGetSourcei(source, AL_SOURCE_STATE, &state);
NSLog(#"%d", queued);
NSLog(#"%d", processed);
NSLog(#"===================================");
while (processed > 0) {
for (int i = 0; i < processed; ++i) {
ALuint buf;
alGetError();
// alSourceStop(source);
ALenum y = alGetError();
NSLog(#"%d", y);
alSourceUnqueueBuffers(source, 1, &buf);
ALenum i = alGetError();
NSLog(#"%d", i);
processed --;
alBufferData(buf, m_format, data + offset, READ_SIZE, m_freq);
ALenum j = alGetError();
NSLog(#"%d", j);
alSourceQueueBuffers(source, 1, &buf);
ALenum k = alGetError();
NSLog(#"%d", k);
offset += READ_SIZE;
// alSourcePlay(source);
}
}
// [self getSourceState];
}
I found the reason about the problem.
the reason I turn Looping ON : alSourcei(source, AL_LOOPING, AL_TRUE);
if you set this , when the source processed a buffer, you want to fill new data or delete the buffer from the source. you will get the error.

How can I get AAC encoding with ExtAudioFile on iOS to work?

I need to convert a WAVE file into an AAC encoded M4A file on iOS. I'm aware that AAC encoding is not supported on older devices or in the simulator. I'm testing that before I run the code. But I still can't get it to work.
I looked into Apple's very own iPhoneExtAudioFileConvertTest example and I thought I followed it exactly, but still no luck!
Currently, I get a -50 (= error in user parameter list) while trying to set the client format on the destination file. On the source file, it works.
Below is my code. Any help is very much appreciated, thanks!
UInt32 size;
// Open a source audio file.
ExtAudioFileRef sourceAudioFile;
ExtAudioFileOpenURL( (CFURLRef)sourceURL, &sourceAudioFile );
// Get the source data format
AudioStreamBasicDescription sourceFormat;
size = sizeof( sourceFormat );
result = ExtAudioFileGetProperty( sourceAudioFile, kExtAudioFileProperty_FileDataFormat, &size, &sourceFormat );
// Define the output format (AAC).
AudioStreamBasicDescription outputFormat;
outputFormat.mFormatID = kAudioFormatMPEG4AAC;
outputFormat.mSampleRate = 44100;
outputFormat.mChannelsPerFrame = 2;
// Use AudioFormat API to fill out the rest of the description.
size = sizeof( outputFormat );
AudioFormatGetProperty( kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat);
// Make a destination audio file with this output format.
ExtAudioFileRef destAudioFile;
ExtAudioFileCreateWithURL( (CFURLRef)destURL, kAudioFileM4AType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &destAudioFile );
// Create canonical PCM client format.
AudioStreamBasicDescription clientFormat;
clientFormat.mSampleRate = sourceFormat.mSampleRate;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
clientFormat.mChannelsPerFrame = 2;
clientFormat.mBitsPerChannel = 16;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = 4;
clientFormat.mFramesPerPacket = 1;
// Set the client format in source and destination file.
size = sizeof( clientFormat );
ExtAudioFileSetProperty( sourceAudioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat );
size = sizeof( clientFormat );
ExtAudioFileSetProperty( destAudioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat );
// Make a buffer
int bufferSizeInFrames = 8000;
int bufferSize = ( bufferSizeInFrames * sourceFormat.mBytesPerFrame );
UInt8 * buffer = (UInt8 *)malloc( bufferSize );
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mNumberChannels = clientFormat.mChannelsPerFrame;
bufferList.mBuffers[0].mData = buffer;
bufferList.mBuffers[0].mDataByteSize = ( bufferSize );
while( TRUE )
{
// Try to fill the buffer to capacity.
UInt32 framesRead = bufferSizeInFrames;
ExtAudioFileRead( sourceAudioFile, &framesRead, &bufferList );
// 0 frames read means EOF.
if( framesRead == 0 )
break;
// Write.
ExtAudioFileWrite( destAudioFile, framesRead, &bufferList );
}
free( buffer );
// Close the files.
ExtAudioFileDispose( sourceAudioFile );
ExtAudioFileDispose( destAudioFile );
Answered my own question: I had to pass this problem to my colleague and he got it to work! I never had the chance to analyze my original problem but I thought, I'd post it here for the sake of completeness. The following method is called from within an NSThread. Parameters are set via the 'threadDictionary' and he created a custom delegate to transmit progress feedback (sorry, SO doesn't understand the formatting properly, the following is supposed to be one block of method implementation):
- (void)encodeToAAC
{
RXAudioEncoderStatusType encoderStatus;
OSStatus result = noErr;
BOOL success = NO;
BOOL cancelled = NO;
UInt32 size;
ExtAudioFileRef sourceAudioFile,destAudioFile;
AudioStreamBasicDescription sourceFormat,outputFormat, clientFormat;
SInt64 totalFrames;
unsigned long long encodedBytes, totalBytes;
int bufferSizeInFrames, bufferSize;
UInt8 * buffer;
AudioBufferList bufferList;
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
NSFileManager * fileManager = [[[NSFileManager alloc] init] autorelease];
NSMutableDictionary * threadDict = [[NSThread currentThread] threadDictionary];
NSObject<RXAudioEncodingDelegate> * delegate = (NSObject<RXAudioEncodingDelegate> *)[threadDict objectForKey:#"Delegate"];
NSString *sourcePath = (NSString *)[threadDict objectForKey:#"SourcePath"];
NSString *destPath = (NSString *)[threadDict objectForKey:#"DestinationPath"];
NSURL * sourceURL = [NSURL fileURLWithPath:sourcePath];
NSURL * destURL = [NSURL fileURLWithPath:destPath];
// Open a source audio file.
result = ExtAudioFileOpenURL( (CFURLRef)sourceURL, &sourceAudioFile );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileOpenURL: %ld", result );
goto bailout;
}
// Get the source data format
size = sizeof( sourceFormat );
result = ExtAudioFileGetProperty( sourceAudioFile, kExtAudioFileProperty_FileDataFormat, &size, &sourceFormat );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileGetProperty: %ld", result );
goto bailout;
}
// Define the output format (AAC).
memset(&outputFormat, 0, sizeof(outputFormat));
outputFormat.mFormatID = kAudioFormatMPEG4AAC;
outputFormat.mSampleRate = 44100;
outputFormat.mFormatFlags = kMPEG4Object_AAC_Main;
outputFormat.mChannelsPerFrame = 2;
outputFormat.mBitsPerChannel = 0;
outputFormat.mBytesPerFrame = 0;
outputFormat.mBytesPerPacket = 0;
outputFormat.mFramesPerPacket = 1024;
// Use AudioFormat API to fill out the rest of the description.
//size = sizeof( outputFormat );
//AudioFormatGetProperty( kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat);
// Make a destination audio file with this output format.
result = ExtAudioFileCreateWithURL( (CFURLRef)destURL, kAudioFileM4AType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &destAudioFile );
if( result != noErr )
{
DLog( #"Error creating destination file: %ld", result );
goto bailout;
}
// Create the canonical PCM client format.
memset(&clientFormat, 0, sizeof(clientFormat));
clientFormat.mSampleRate = sourceFormat.mSampleRate;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; //kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
clientFormat.mChannelsPerFrame = 2;
clientFormat.mBitsPerChannel = 16;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = 4;
clientFormat.mFramesPerPacket = 1;
// Set the client format in source and destination file.
size = sizeof( clientFormat );
result = ExtAudioFileSetProperty( sourceAudioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat );
if( result != noErr )
{
DLog( #"Error while setting client format in source file: %ld", result );
goto bailout;
}
size = sizeof( clientFormat );
result = ExtAudioFileSetProperty( destAudioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat );
if( result != noErr )
{
DLog( #"Error while setting client format in destination file: %ld", result );
goto bailout;
}
// Make a buffer
bufferSizeInFrames = 8000;
bufferSize = ( bufferSizeInFrames * sourceFormat.mBytesPerFrame );
buffer = (UInt8 *)malloc( bufferSize );
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mNumberChannels = clientFormat.mChannelsPerFrame;
bufferList.mBuffers[0].mData = buffer;
bufferList.mBuffers[0].mDataByteSize = ( bufferSize );
// Obtain total number of audio frames to encode
size = sizeof( totalFrames );
result = ExtAudioFileGetProperty( sourceAudioFile, kExtAudioFileProperty_FileLengthFrames, &size, &totalFrames );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileGetProperty, could not get kExtAudioFileProperty_FileLengthFrames from sourceFile: %ld", result );
goto bailout;
}
encodedBytes = 0;
totalBytes = totalFrames * sourceFormat.mBytesPerFrame;
[threadDict setValue:[NSValue value:&totalBytes withObjCType:#encode(unsigned long long)] forKey:#"TotalBytes"];
if (delegate != nil)
[self performSelectorOnMainThread:#selector(didStartEncoding) withObject:nil waitUntilDone:NO];
while( TRUE )
{
// Try to fill the buffer to capacity.
UInt32 framesRead = bufferSizeInFrames;
result = ExtAudioFileRead( sourceAudioFile, &framesRead, &bufferList );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileRead: %ld", result );
success = NO;
break;
}
// 0 frames read means EOF.
if( framesRead == 0 ) {
success = YES;
break;
}
// Write.
result = ExtAudioFileWrite( destAudioFile, framesRead, &bufferList );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileWrite: %ld", result );
success = NO;
break;
}
encodedBytes += framesRead * sourceFormat.mBytesPerFrame;
if (delegate != nil)
[self performSelectorOnMainThread:#selector(didEncodeBytes:) withObject:[NSValue value:&encodedBytes withObjCType:#encode(unsigned long long)] waitUntilDone:NO];
if ([[NSThread currentThread] isCancelled]) {
cancelled = YES;
DLog( #"Encoding was cancelled." );
success = NO;
break;
}
}
free( buffer );
// Close the files.
ExtAudioFileDispose( sourceAudioFile );
ExtAudioFileDispose( destAudioFile );
bailout:
encoderStatus.result = result;
[threadDict setValue:[NSValue value:&encoderStatus withObjCType:#encode(RXAudioEncoderStatusType)] forKey:#"EncodingError"];
// Report to the delegate if one exists
if (delegate != nil)
if (success)
[self performSelectorOnMainThread:#selector(didEncodeFile) withObject:nil waitUntilDone:YES];
else if (cancelled)
[self performSelectorOnMainThread:#selector(encodingCancelled) withObject:nil waitUntilDone:YES];
else
[self performSelectorOnMainThread:#selector(failedToEncodeFile) withObject:nil waitUntilDone:YES];
// Clear the partially encoded file if encoding failed or is cancelled midway
if ((cancelled || !success) && [fileManager fileExistsAtPath:destPath])
[fileManager removeItemAtURL:destURL error:NULL];
[threadDict setValue:[NSNumber numberWithBool:NO] forKey:#"isEncoding"];
[pool release];
}
Are you sure the sample rates match? Can you print the values for clientFormat and outputFormat at the point you’re getting the error? Otherwise I think you might need an AudioConverter.
I tried out the code in Sebastian's answer and while it worked for uncompressed files (aif, wav, caf), it didn't for a lossy compressed file (mp3). I also had an error code of -50, but in ExtAudioFileRead rather than ExtAudioFileSetProperty. From this question I learned that this error signifies a problem with the function parameters. Turns out the buffer for reading the audio file had a size of 0 bytes, a result of this line:
int bufferSize = ( bufferSizeInFrames * sourceFormat.mBytesPerFrame );
Switching it to use the the bytes per frame from clientFormat instead (sourceFormat's value was 0) worked for me:
int bufferSize = ( bufferSizeInFrames * clientFormat.mBytesPerFrame );
This line was also in the question code, but I don't think that was the problem (but I had too much text for a comment).

iPhone Extended Audio File Services, mp3 -> PCM -> mp3

I would like to use the Core Audio extended audio file services framework to read a mp3 file, process it as a PCM, then write the modified file back as a mp3 file. I am able to convert the mp3 file to PCM, but am NOT able to write the PCM file back as a mp3.
I have followed and analyzed the Apple ExtAudioFileConvertTest sample and also cannot get that to work. The failure point is when I set the client format for the output file(set to a canonical PCM type). This fails with error "fmt?" if the output target type is set to mp3.
Is it possible to do mp3 -> PCM -> mp3 on the iPhone? If I remove the failing line, setting the kExtAudioFileProperty_ClientDataFormat for the output file, the code fails with "pkd?" when I try to write to the output file later. So basically I have 2 errors:
1) "fmt?" when trying to set kExtAudioFileProperty_ClientDataFormat for the output file
2) "pkd?" when trying to write to the output file
Here is the code to set up the files:
NSURL *fileUrl = [NSURL fileURLWithPath:sourceFilePath];
OSStatus error = noErr;
//
// Open the file
//
error = ExtAudioFileOpenURL((CFURLRef)fileUrl, &sourceFile);
if(error){
NSLog(#"AudioClip: Error opening file at %#. Error code %d", sourceFilePath, error);
return NO;
}
//
// Store the number of frames in the file
//
SInt64 numberOfFrames = 0;
UInt32 propSize = sizeof(SInt64);
error = ExtAudioFileGetProperty(sourceFile, kExtAudioFileProperty_FileLengthFrames, &propSize, &numberOfFrames);
if(error){
NSLog(#"AudioClip: Error retreiving number of frames: %d", error);
[self closeAudioFile];
return NO;
}
frameCount = numberOfFrames;
//
// Get the source file format info
//
propSize = sizeof(sourceFileFormat);
memset(&sourceFileFormat, 0, sizeof(AudioStreamBasicDescription));
error = ExtAudioFileGetProperty(sourceFile, kExtAudioFileProperty_FileDataFormat, &propSize, &sourceFileFormat);
if(error){
NSLog(#"AudioClip: Error getting source audio file properties: %d", error);
[self closeAudioFile];
return NO;
}
//
// Set the format for our read. We read in PCM, clip, then write out mp3
//
memset(&readFileFormat, 0, sizeof(AudioStreamBasicDescription));
readFileFormat.mFormatID = kAudioFormatLinearPCM;
readFileFormat.mSampleRate = 44100;
readFileFormat.mFormatFlags = kAudioFormatFlagsCanonical | kAudioFormatFlagIsNonInterleaved;
readFileFormat.mChannelsPerFrame = 1;
readFileFormat.mBitsPerChannel = 8 * sizeof(AudioSampleType);
readFileFormat.mFramesPerPacket = 1;
readFileFormat.mBytesPerFrame = sizeof(AudioSampleType);
readFileFormat.mBytesPerPacket = sizeof(AudioSampleType);
readFileFormat.mReserved = 0;
propSize = sizeof(readFileFormat);
error = ExtAudioFileSetProperty(sourceFile, kExtAudioFileProperty_ClientDataFormat, propSize, &readFileFormat);
if(error){
NSLog(#"AudioClip: Error setting read format: %d", error);
[self closeAudioFile];
return NO;
}
//
// Set the format for the output file that we will write
//
propSize = sizeof(targetFileFormat);
memset(&targetFileFormat, 0, sizeof(AudioStreamBasicDescription));
targetFileFormat.mFormatID = kAudioFormatMPEGLayer3;
targetFileFormat.mChannelsPerFrame = 1;
//
// Let the API fill in the rest
//
error = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &propSize, &targetFileFormat);
if(error){
NSLog(#"AudioClip: Error getting target file format info: %d", error);
[self closeAudioFile];
return NO;
}
//
// Create our target file
//
NSURL *writeURL = [NSURL fileURLWithPath:targetFilePath];
error = ExtAudioFileCreateWithURL( (CFURLRef)writeURL, kAudioFileMP3Type,
&targetFileFormat, NULL,
kAudioFileFlags_EraseFile,
&targetFile);
if(error){
NSLog(#"AudioClip: Error opening target file for writing: %d", error);
[self closeAudioFile];
return NO;
}
//
// Set the client format for the output file the same as our client format for the input file
//
propSize = sizeof(readFileFormat);
error = ExtAudioFileSetProperty(targetFile, kExtAudioFileProperty_ClientDataFormat, propSize, &readFileFormat);
if(error){
NSLog(#"AudioClip: Error, cannot set client format for output file: %d", error);
[self closeAudioFile];
return NO;
}
And the code to read/write:
NSInteger framesToRead = finalFrameNumber - startFrameNumber;
while(framesToRead > 0){
//
// Read frames into our data
//
short *data = (short *)malloc(framesToRead * sizeof(short));
if(!data){
NSLog(#"AudioPlayer: Cannot init memory for read buffer");
[self notifyDelegateFailure];
[self closeAudioFile];
return;
}
AudioBufferList bufferList;
OSStatus error = noErr;
UInt32 loadedPackets = framesToRead;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mData = data;
bufferList.mBuffers[0].mDataByteSize = (framesToRead * sizeof(short));
NSLog(#"AudioClip: Before read nNumberBuffers = %d, mNumberChannels = %d, mData = %p, mDataByteSize = %d",
bufferList.mNumberBuffers, bufferList.mBuffers[0].mNumberChannels, bufferList.mBuffers[0].mData,
bufferList.mBuffers[0].mDataByteSize);
error = ExtAudioFileRead(sourceFile, &loadedPackets, &bufferList);
if(error){
NSLog(#"AudioClip: Error %d from ExtAudioFileRead", error);
[self notifyDelegateFailure];
[self closeAudioFile];
return;
}
//
// Now write the data to our file which will convert it into a mp3 file
//
NSLog(#"AudioClip: After read nNumberBuffers = %d, mNumberChannels = %d, mData = %p, mDataByteSize = %d",
bufferList.mNumberBuffers, bufferList.mBuffers[0].mNumberChannels, bufferList.mBuffers[0].mData,
bufferList.mBuffers[0].mDataByteSize);
error = ExtAudioFileWrite(targetFile, loadedPackets, &bufferList);
if(error){
NSLog(#"AudioClip: Error %d from ExtAudioFileWrite", error);
[self notifyDelegateFailure];
[self closeAudioFile];
return;
}
framesToRead -= loadedPackets;
}
Apple doesn't supply an MP3 encoder- only a decoder. The source document is a bit outdated, but AFAIK it is still current: http://developer.apple.com/library/ios/#documentation/MusicAudio/Conceptual/CoreAudioOverview/SupportedAudioFormatsMacOSX/SupportedAudioFormatsMacOSX.html%23//apple_ref/doc/uid/TP40003577-CH7-SW1
I think your best bet might be to use AAC.