How do I get waveform from OpenAL buffer? - openal

I am using OpenAL and ALUT on a project I am working on. I need to get the waveform from the buffer. I am using alutLoadWAVFile to load the data into the buffer. When I output each item in the buffer, I get something like this:
This is not the waveform, because the waveform looks like this in Audacity:
My code (the relevant parts anyway):
unsigned char* alBuffer;
...
alutLoadWAVFile((ALbyte*)("test2.wav"), &alFormatBuffer,
(void **)&alBuffer, (ALsizei*)&alBufferLen, &alFreqBuffer, &alLoop);
...
for (int i = 0; i < (alBufferLen>5000?5000:alBufferLen); i++) {
log << (int)data[i] << "\n";
}

I'm thinking that what you're expecting is that the buffer data to be exactly what you see in Audacity. This won't really be the case if the wav is anything other than 8Bit Mono (I think anyway, it's been a while).
Also, you seem to be casting data[i] from an unsigned char to an int, which may be another problem.
Also, you might want to attempt manually reading the wav file instead of using ALUT. ALUT is a crutch that makes you weak. :)
Here's a program that reads and plays a wav without ALUT. With it you'll be able to figure out the mono/stereoness and freq of the wav file. Hopefully from there you can start messing with the buffer directly and output exactly what you want.
#include <stdio.h>
#include <AL\al.h>
#include <AL\alc.h>
struct RIFF_Header {
char chunkID[4];
long chunkSize;
char format[4];
};
struct WAVE_Format {
char subChunkID[4];
long subChunkSize;
short audioFormat;
short numChannels;
long sampleRate;
long byteRate;
short blockAlign;
short bitsPerSample;
};
struct WAVE_Data {
char subChunkID[4];
long subChunk2Size;
};
bool loadWavFile(const char* filename, ALuint* buffer,
ALsizei* size, ALsizei* frequency,
ALenum* format) {
FILE* soundFile = NULL;
WAVE_Format wave_format;
RIFF_Header riff_header;
WAVE_Data wave_data;
unsigned char* data;
try {
soundFile = fopen(filename, "rb");
if (!soundFile)
throw (filename);
fread(&riff_header, sizeof(RIFF_Header), 1, soundFile);
if ((riff_header.chunkID[0] != 'R' ||
riff_header.chunkID[1] != 'I' ||
riff_header.chunkID[2] != 'F' ||
riff_header.chunkID[3] != 'F') &&
(riff_header.format[0] != 'W' ||
riff_header.format[1] != 'A' ||
riff_header.format[2] != 'V' ||
riff_header.format[3] != 'E'))
throw ("Invalid RIFF or WAVE Header");
fread(&wave_format, sizeof(WAVE_Format), 1, soundFile);
if (wave_format.subChunkID[0] != 'f' ||
wave_format.subChunkID[1] != 'm' ||
wave_format.subChunkID[2] != 't' ||
wave_format.subChunkID[3] != ' ')
throw ("Invalid Wave Format");
if (wave_format.subChunkSize > 16)
fseek(soundFile, sizeof(short), SEEK_CUR);
fread(&wave_data, sizeof(WAVE_Data), 1, soundFile);
if (wave_data.subChunkID[0] != 'd' ||
wave_data.subChunkID[1] != 'a' ||
wave_data.subChunkID[2] != 't' ||
wave_data.subChunkID[3] != 'a')
throw ("Invalid data header");
data = new unsigned char[wave_data.subChunk2Size];
if (!fread(data, wave_data.subChunk2Size, 1, soundFile))
throw ("error loading WAVE data into struct!");
*size = wave_data.subChunk2Size;
*frequency = wave_format.sampleRate;
if (wave_format.numChannels == 1) {
if (wave_format.bitsPerSample == 8 )
*format = AL_FORMAT_MONO8;
else if (wave_format.bitsPerSample == 16)
*format = AL_FORMAT_MONO16;
} else if (wave_format.numChannels == 2) {
if (wave_format.bitsPerSample == 8 )
*format = AL_FORMAT_STEREO8;
else if (wave_format.bitsPerSample == 16)
*format = AL_FORMAT_STEREO16;
}
alGenBuffers(1, buffer);
alBufferData(*buffer, *format, (void*)data,
*size, *frequency);
fclose(soundFile);
return true;
} catch(char* error) {
if (soundFile != NULL)
fclose(soundFile);
return false;
}
}
int main(){
//Sound play data
ALint state; // The state of the sound source
ALuint bufferID; // The OpenAL sound buffer ID
ALuint sourceID; // The OpenAL sound source
ALenum format; // The sound data format
ALsizei freq; // The frequency of the sound data
ALsizei size; // Data size
ALCdevice* device = alcOpenDevice(NULL);
ALCcontext* context = alcCreateContext(device, NULL);
alcMakeContextCurrent(context);
// Create sound buffer and source
alGenBuffers(1, &bufferID);
alGenSources(1, &sourceID);
// Set the source and listener to the same location
alListener3f(AL_POSITION, 0.0f, 0.0f, 0.0f);
alSource3f(sourceID, AL_POSITION, 0.0f, 0.0f, 0.0f);
loadWavFile("..\\wavdata\\YOURWAVHERE.wav", &bufferID, &size, &freq, &format);
alSourcei(sourceID, AL_BUFFER, bufferID);
alSourcePlay(sourceID);
do{
alGetSourcei(sourceID, AL_SOURCE_STATE, &state);
} while (state != AL_STOPPED);
alDeleteBuffers(1, &bufferID);
alDeleteSources(1, &sourceID);
alcDestroyContext(context);
alcCloseDevice(device);
return 0;
}

Related

GPS Data Parsing

I got my hands on a USB DeLORME Earthmate GPA LT-20, I want to use it as part of a mobile GPS ratification unit, Raspberry Pi based. I have been able to access the raw serial data but am at odds with an effective means of parsing the data into a usable format. the current plan is just to have it printed on screen in a meaningful way. just looking at ideas. Bellow is a sampling of the data, i have altered the GPS location data to remove the particular location of testing. Perfer to code in C
I have read the following refrence sites:
http://www.gpsinformation.org/dale/nmea.htm
https://en.wikipedia.org/wiki/List_of_GPS_satellites
$GPRMC,050229.000,A,3XX8.647,N,11XX1.282,W,0.1,0.0,140518,11.7,E*4B
$GPGGA,050229.000,3XX8.64662,N,11XX1.28205,W,1,06,1.5,725.48,M,-28.4,M,,*5D
$GPVTG,0.0,T,11.7,M,0.1,N,0.1,K*79
$GPGSV,3,1,09,10,34,240,34,13,24,054,00,15,47,086,26,16,25,292,30*77
$GPGSV,3,2,09,20,79,310,31,21,65,345,37,26,25,260,00,27,11,320,00*78
$GPGSV,3,3,09,29,46,147,34,,,,,,,,,,,,*4C
$PSTMECH,21,7,20,7,15,7,29,7,10,7,00,0,16,7,00,0,00,0,00,0,00,0,00,0*5C
Looking at this information:
`"$GPRMC,050229.000,A,3008.647,N,11001.282,W,0.1,0.0,140518,11.7,E*4B"`
Use strtok for parsing:
int main(void)
{
FILE *fp = fopen("test.txt", "r");
char buf[256];
char *array[20];
while(fgets(buf, sizeof(buf), fp))
{
if(strstr(buf, "$GPRMC"))
{
int count = 0;
char *token;
token = strtok(buf, ",");
while(token != NULL)
{
array[count++] = token;
token = strtok(NULL, ",");
if(count == 20)
break;
}
printf("Latitude : %s %s\n", array[3], array[4]);
printf("Longitude : %s %s\n", array[5], array[6]);
}
}
return 0;
}
Result:
Latitude : 3008.647 N
Longitude : 11001.282 W

How to play audio backwards?

Some people suggested to read the audio data from end to start and create a copy written from start to end, and then simply play that reversed audio data.
Are there existing examples for iOS how this is done?
I found an example project called MixerHost, which at some point uses an
AudioUnitSampleType holding the audio data that has been read from file, and assigning it to a buffer.
This is defined as:
typedef SInt32 AudioUnitSampleType;
#define kAudioUnitSampleFractionBits 24
And according to Apple:
The canonical audio sample type for audio units and other audio
processing in iPhone OS is noninterleaved linear PCM with 8.24-bit
fixed-point samples.
So in other words it holds noninterleaved linear PCM audio data.
But I can't figure out where this data is beeing read in, and where it is stored. Here's the code that loads the audio data and buffers it:
- (void) readAudioFilesIntoMemory {
for (int audioFile = 0; audioFile < NUM_FILES; ++audioFile) {
NSLog (#"readAudioFilesIntoMemory - file %i", audioFile);
// Instantiate an extended audio file object.
ExtAudioFileRef audioFileObject = 0;
// Open an audio file and associate it with the extended audio file object.
OSStatus result = ExtAudioFileOpenURL (sourceURLArray[audioFile], &audioFileObject);
if (noErr != result || NULL == audioFileObject) {[self printErrorMessage: #"ExtAudioFileOpenURL" withStatus: result]; return;}
// Get the audio file's length in frames.
UInt64 totalFramesInFile = 0;
UInt32 frameLengthPropertySize = sizeof (totalFramesInFile);
result = ExtAudioFileGetProperty (
audioFileObject,
kExtAudioFileProperty_FileLengthFrames,
&frameLengthPropertySize,
&totalFramesInFile
);
if (noErr != result) {[self printErrorMessage: #"ExtAudioFileGetProperty (audio file length in frames)" withStatus: result]; return;}
// Assign the frame count to the soundStructArray instance variable
soundStructArray[audioFile].frameCount = totalFramesInFile;
// Get the audio file's number of channels.
AudioStreamBasicDescription fileAudioFormat = {0};
UInt32 formatPropertySize = sizeof (fileAudioFormat);
result = ExtAudioFileGetProperty (
audioFileObject,
kExtAudioFileProperty_FileDataFormat,
&formatPropertySize,
&fileAudioFormat
);
if (noErr != result) {[self printErrorMessage: #"ExtAudioFileGetProperty (file audio format)" withStatus: result]; return;}
UInt32 channelCount = fileAudioFormat.mChannelsPerFrame;
// Allocate memory in the soundStructArray instance variable to hold the left channel,
// or mono, audio data
soundStructArray[audioFile].audioDataLeft =
(AudioUnitSampleType *) calloc (totalFramesInFile, sizeof (AudioUnitSampleType));
AudioStreamBasicDescription importFormat = {0};
if (2 == channelCount) {
soundStructArray[audioFile].isStereo = YES;
// Sound is stereo, so allocate memory in the soundStructArray instance variable to
// hold the right channel audio data
soundStructArray[audioFile].audioDataRight =
(AudioUnitSampleType *) calloc (totalFramesInFile, sizeof (AudioUnitSampleType));
importFormat = stereoStreamFormat;
} else if (1 == channelCount) {
soundStructArray[audioFile].isStereo = NO;
importFormat = monoStreamFormat;
} else {
NSLog (#"*** WARNING: File format not supported - wrong number of channels");
ExtAudioFileDispose (audioFileObject);
return;
}
// Assign the appropriate mixer input bus stream data format to the extended audio
// file object. This is the format used for the audio data placed into the audio
// buffer in the SoundStruct data structure, which is in turn used in the
// inputRenderCallback callback function.
result = ExtAudioFileSetProperty (
audioFileObject,
kExtAudioFileProperty_ClientDataFormat,
sizeof (importFormat),
&importFormat
);
if (noErr != result) {[self printErrorMessage: #"ExtAudioFileSetProperty (client data format)" withStatus: result]; return;}
// Set up an AudioBufferList struct, which has two roles:
//
// 1. It gives the ExtAudioFileRead function the configuration it
// needs to correctly provide the data to the buffer.
//
// 2. It points to the soundStructArray[audioFile].audioDataLeft buffer, so
// that audio data obtained from disk using the ExtAudioFileRead function
// goes to that buffer
// Allocate memory for the buffer list struct according to the number of
// channels it represents.
AudioBufferList *bufferList;
bufferList = (AudioBufferList *) malloc (
sizeof (AudioBufferList) + sizeof (AudioBuffer) * (channelCount - 1)
);
if (NULL == bufferList) {NSLog (#"*** malloc failure for allocating bufferList memory"); return;}
// initialize the mNumberBuffers member
bufferList->mNumberBuffers = channelCount;
// initialize the mBuffers member to 0
AudioBuffer emptyBuffer = {0};
size_t arrayIndex;
for (arrayIndex = 0; arrayIndex < channelCount; arrayIndex++) {
bufferList->mBuffers[arrayIndex] = emptyBuffer;
}
// set up the AudioBuffer structs in the buffer list
bufferList->mBuffers[0].mNumberChannels = 1;
bufferList->mBuffers[0].mDataByteSize = totalFramesInFile * sizeof (AudioUnitSampleType);
bufferList->mBuffers[0].mData = soundStructArray[audioFile].audioDataLeft;
if (2 == channelCount) {
bufferList->mBuffers[1].mNumberChannels = 1;
bufferList->mBuffers[1].mDataByteSize = totalFramesInFile * sizeof (AudioUnitSampleType);
bufferList->mBuffers[1].mData = soundStructArray[audioFile].audioDataRight;
}
// Perform a synchronous, sequential read of the audio data out of the file and
// into the soundStructArray[audioFile].audioDataLeft and (if stereo) .audioDataRight members.
UInt32 numberOfPacketsToRead = (UInt32) totalFramesInFile;
result = ExtAudioFileRead (
audioFileObject,
&numberOfPacketsToRead,
bufferList
);
free (bufferList);
if (noErr != result) {
[self printErrorMessage: #"ExtAudioFileRead failure - " withStatus: result];
// If reading from the file failed, then free the memory for the sound buffer.
free (soundStructArray[audioFile].audioDataLeft);
soundStructArray[audioFile].audioDataLeft = 0;
if (2 == channelCount) {
free (soundStructArray[audioFile].audioDataRight);
soundStructArray[audioFile].audioDataRight = 0;
}
ExtAudioFileDispose (audioFileObject);
return;
}
NSLog (#"Finished reading file %i into memory", audioFile);
// Set the sample index to zero, so that playback starts at the
// beginning of the sound.
soundStructArray[audioFile].sampleNumber = 0;
// Dispose of the extended audio file object, which also
// closes the associated file.
ExtAudioFileDispose (audioFileObject);
}
}
Which part contains the array of audio samples which have to be reversed? Is it the AudioUnitSampleType?
bufferList->mBuffers[0].mData = soundStructArray[audioFile].audioDataLeft;
Note: audioDataLeft is defined as an AudioUnitSampleType, which is an SInt32 but not an array.
I found a clue in a Core Audio Mailing list:
Well, nothing to do with iPh*n* as far as I know (unless some audio
API has been omitted -- I am not a member of that program). AFAIR,
AudioFile.h and ExtendedAudioFile.h should provide you with what you
need to read or write a caf and access its streams/channels.
Basically, you want to read each channel/stream backwards, so, if you
don't need properties of the audio file it is pretty straightforward
once you have a handle on that channel's data, assuming it is not in a
compressed format. Considering the number of formats a caf can
represent, this could take a few more lines of code than you're
thinking. Once you have a handle on uncompressed data, it should be
about as easy as reversing a string. Then you would of course replace
the file's data with the reversed data, or you could just feed the
audio output (or wherever you're sending the reversed signal) reading
whatever stream you have backwards.
This is what I tried, but when I assign my reversed buffer to the mData of both channels, I hear nothing:
AudioUnitSampleType *leftData = soundStructArray[audioFile].audioDataLeft;
AudioUnitSampleType *reversedData = (AudioUnitSampleType *) calloc (totalFramesInFile, sizeof (AudioUnitSampleType));
UInt64 j = 0;
for (UInt64 i = (totalFramesInFile - 1); i > -1; i--) {
reversedData[j] = leftData[i];
j++;
}
I have worked on a sample app, which records what user says and plays them backwards. I have used CoreAudio to achieve this. Link to app code.
/*
As each sample is 16-bits in size(2 bytes)(mono channel).
You can load each sample at a time by copying it into a different buffer by starting at the end of the recording and
reading backwards. When you get to the start of the data you have reversed the data and playing will be reversed.
*/
// set up output file
AudioFileID outputAudioFile;
AudioStreamBasicDescription myPCMFormat;
myPCMFormat.mSampleRate = 16000.00;
myPCMFormat.mFormatID = kAudioFormatLinearPCM ;
myPCMFormat.mFormatFlags = kAudioFormatFlagsCanonical;
myPCMFormat.mChannelsPerFrame = 1;
myPCMFormat.mFramesPerPacket = 1;
myPCMFormat.mBitsPerChannel = 16;
myPCMFormat.mBytesPerPacket = 2;
myPCMFormat.mBytesPerFrame = 2;
AudioFileCreateWithURL((__bridge CFURLRef)self.flippedAudioUrl,
kAudioFileCAFType,
&myPCMFormat,
kAudioFileFlags_EraseFile,
&outputAudioFile);
// set up input file
AudioFileID inputAudioFile;
OSStatus theErr = noErr;
UInt64 fileDataSize = 0;
AudioStreamBasicDescription theFileFormat;
UInt32 thePropertySize = sizeof(theFileFormat);
theErr = AudioFileOpenURL((__bridge CFURLRef)self.recordedAudioUrl, kAudioFileReadPermission, 0, &inputAudioFile);
thePropertySize = sizeof(fileDataSize);
theErr = AudioFileGetProperty(inputAudioFile, kAudioFilePropertyAudioDataByteCount, &thePropertySize, &fileDataSize);
UInt32 dataSize = fileDataSize;
void* theData = malloc(dataSize);
//Read data into buffer
UInt32 readPoint = dataSize;
UInt32 writePoint = 0;
while( readPoint > 0 )
{
UInt32 bytesToRead = 2;
AudioFileReadBytes( inputAudioFile, false, readPoint, &bytesToRead, theData );
AudioFileWriteBytes( outputAudioFile, false, writePoint, &bytesToRead, theData );
writePoint += 2;
readPoint -= 2;
}
free(theData);
AudioFileClose(inputAudioFile);
AudioFileClose(outputAudioFile);
Hope this helps.
Typically, when an ASBD is being used, the fields describe the complete layout of the sample data in the buffers that are represented by this description - where typically those buffers are represented by an AudioBuffer that is contained in an AudioBufferList.
However, when an ASBD has the kAudioFormatFlagIsNonInterleaved flag, the AudioBufferList has a different structure and semantic. In this case, the ASBD fields will describe the format of ONE of the AudioBuffers that are contained in the list, AND each AudioBuffer in the list is determined to have a single (mono) channel of audio data. Then, the ASBD's mChannelsPerFrame will indicate the total number of AudioBuffers that are contained within the AudioBufferList - where each buffer contains one channel. This is used primarily with the AudioUnit (and AudioConverter) representation of this list - and won't be found in the AudioHardware usage of this structure.
You do not have to allocate a separate buffer to store the reversed data, this can take a fair bit of CPU, depending on the length of sound. To play a sound backwards, just make the sampleNumber counter start at totalFramesInFile - 1.
You can modify MixerHost like this, to achieve the desired effect.
Replace soundStructArray[audioFile].sampleNumber = 0; with
soundStructArray[audioFile].sampleNumber = totalFramesInFile - 1;
Make sampleNumber SInt32 instead of UInt32.
Replace the loop which you write the samples out with this.
for (UInt32 frameNumber = 0; frameNumber < inNumberFrames; ++frameNumber) {
outSamplesChannelLeft[frameNumber] = dataInLeft[sampleNumber];
if (isStereo) outSamplesChannelRight[frameNumber] = dataInRight[sampleNumber];
if (--sampleNumber < 0) sampleNumber = frameTotalForSound - 1;
}
This effectively makes it play backwards. Mmmm. It's been a while since I've heard the MixerHost music. I must admit I find it to be quite pleasing.

Recording mp3 instead of caf file

I'm looking at this example: https://developer.apple.com/library/ios/#samplecode/SpeakHere/Introduction/Intro.html
I modified it ( the AQRecorder.mm)to record mp3 instead of caf file. I changed from kAudioFileCAFType to kAudioFileMP3Type but it does not create the file.
The code became
void AQRecorder::SetupAudioFormat(UInt32 inFormatID)
{
memset(&mRecordFormat, 0, sizeof(mRecordFormat));
UInt32 size = sizeof(mRecordFormat.mSampleRate);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareSampleRate,
&size,
&mRecordFormat.mSampleRate), "couldn't get hardware sample rate");
size = sizeof(mRecordFormat.mChannelsPerFrame);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareInputNumberChannels,
&size,
&mRecordFormat.mChannelsPerFrame), "couldn't get input channel count");
mRecordFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM)
{
// if we want pcm, default to signed 16-bit little-endian
mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
mRecordFormat.mBitsPerChannel = 16;
mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
mRecordFormat.mFramesPerPacket = 1;
}
}
void AQRecorder::StartRecord(CFStringRef inRecordFile)
{
int i, bufferByteSize;
UInt32 size;
CFURLRef url;
try {
mFileName = CFStringCreateCopy(kCFAllocatorDefault, inRecordFile);
// specify the recording format
SetupAudioFormat(kAudioFormatLinearPCM);
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
this /* userData */,
NULL /* run loop */, NULL /* run loop mode */,
0 /* flags */, &mQueue), "AudioQueueNewInput failed");
// get the record format back from the queue's audio converter --
// the file may require a more specific stream description than was necessary to create the encoder.
mRecordPacket = 0;
size = sizeof(mRecordFormat);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
&mRecordFormat, &size), "couldn't get queue's format");
NSString *recordFile = [NSTemporaryDirectory() stringByAppendingPathComponent: (NSString*)inRecordFile];
NSLog(recordFile);
url = CFURLCreateWithString(kCFAllocatorDefault, (CFStringRef)recordFile, NULL);
// create the audio file kAudioFileCAFType
XThrowIfError(AudioFileCreateWithURL(url, kAudioFileMP3Type, &mRecordFormat, kAudioFileFlags_EraseFile,
&mRecordFile), "AudioFileCreateWithURL failed");
CFRelease(url);
// copy the cookie first to give the file object as much info as we can about the data going in
// not necessary for pcm, but required for some compressed audio
CopyEncoderCookieToFile();
// allocate and enqueue buffers
bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for half a second
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
// start the queue
mIsRunning = true;
XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
}
catch (CAXException &e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");
}
}
Am I missing any settings, or what's wrong with my code? , mp3 be supported from apple
https://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/AudioFileConvertRef/Reference/reference.html
iOS devices don't support recording to the MP3 encoding format. Actually, I don't think any of the iOS devices do. You have to choose an alternate format. Core Audio can read, but not write, MP3 files.
You can use Lame library for encoding caf to mp3 file format. Check this sample iOSMp3Recorder

Wav file compression from plcm to aac

I record audio file using the following code which I developed according to apple sample code
void AQRecorder::SetupAudioFormat(UInt32 inFormatID)
{
memset(&mRecordFormat, 0, sizeof(mRecordFormat));
UInt32 size = sizeof(mRecordFormat.mSampleRate);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareSampleRate,
&size,
&mRecordFormat.mSampleRate), "couldn't get hardware sample rate");
size = sizeof(mRecordFormat.mChannelsPerFrame);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareInputNumberChannels,
&size,
&mRecordFormat.mChannelsPerFrame), "couldn't get input channel count");
mRecordFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM)
{
// if we want pcm, default to signed 16-bit little-endian
mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
mRecordFormat.mBitsPerChannel = 16;
mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
mRecordFormat.mFramesPerPacket = 1;
}
}
void AQRecorder::StartRecord(CFStringRef inRecordFile)
{
int i, bufferByteSize;
UInt32 size;
CFURLRef url;
try {
mFileName = CFStringCreateCopy(kCFAllocatorDefault, inRecordFile);
// specify the recording format
SetupAudioFormat(kAudioFormatLinearPCM);
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
this /* userData */,
NULL /* run loop */, NULL /* run loop mode */,
0 /* flags */, &mQueue), "AudioQueueNewInput failed");
// get the record format back from the queue's audio converter --
// the file may require a more specific stream description than was necessary to create the encoder.
mRecordPacket = 0;
size = sizeof(mRecordFormat);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
&mRecordFormat, &size), "couldn't get queue's format");
NSString *recordFile = [NSTemporaryDirectory() stringByAppendingPathComponent: (NSString*)inRecordFile];
NSLog(recordFile);
url = CFURLCreateWithString(kCFAllocatorDefault, (CFStringRef)recordFile, NULL);
// create the audio file kAudioFileCAFType
XThrowIfError(AudioFileCreateWithURL(url, kAudioFileWAVEType, &mRecordFormat, kAudioFileFlags_EraseFile,
&mRecordFile), "AudioFileCreateWithURL failed");
CFRelease(url);
// copy the cookie first to give the file object as much info as we can about the data going in
// not necessary for pcm, but required for some compressed audio
CopyEncoderCookieToFile();
// allocate and enqueue buffers
bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for half a second
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
// start the queue
mIsRunning = true;
XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
}
catch (CAXException &e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");
}
}
this code lead to large output file for example 2 minutes be saved on 11 MB
I need to modify the code such that it be AAC compressed , so I reduce its size any idea

Encoding Speex with libavcodec (FFMpeg)?

I successfully compiled libavcodec with speex enabled.
I modified example from FFMPEG docs to encode the sample audio into Speex.
But the result file cannot be played with VLC Player(which has Speex decoder).
Any tips?
static void audio_encode_example(const char *filename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int frame_size, i, j, out_size, outbuf_size;
FILE *f;
short *samples;
float t, tincr;
uint8_t *outbuf;
printf("Audio encoding\n");
/* find the MP2 encoder */
codec = avcodec_find_encoder(CODEC_ID_SPEEX);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
c= avcodec_alloc_context();
/* put sample parameters */
c->bit_rate = 64000;
c->sample_rate = 32000;
c->channels = 2;
c->sample_fmt=AV_SAMPLE_FMT_S16;
/* open it */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
/* the codec gives us the frame size, in samples */
frame_size = c->frame_size;
printf("frame size %d\n",frame_size);
samples =(short*) malloc(frame_size * 2 * c->channels);
outbuf_size = 10000;
outbuf =( uint8_t*) malloc(outbuf_size);
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "could not open %s\n", filename);
exit(1);
}
/* encode a single tone sound */
t = 0;
tincr = 2 * M_PI * 440.0 / c->sample_rate;
for(i=0;i<200;i++) {
for(j=0;j<frame_size;j++) {
samples[2*j] = (int)(sin(t) * 10000);
samples[2*j+1] = samples[2*j];
t += tincr;
}
/* encode the samples */
out_size = avcodec_encode_audio(c, outbuf, outbuf_size, samples);
fwrite(outbuf, 1, out_size, f);
}
fclose(f);
free(outbuf);
free(samples);
avcodec_close(c);
av_free(c);
}
int main(int argc, char **argv)
{
avcodec_register_all();
audio_encode_example(argv[1]);
return 0;
}
Does Speex (I don't know it) by chance require a container format into which these frames are placed, with some kind of header? You're just taking the output of an encoder and dumping into a file without going through any formatting (libavformat).
Try encoding the same data into Speex using the ffmpeg command line utility and see if the resulting file plays.
I'm looking at some info at www.speex.org and it seems that speex data is put into .ogg files. The player you are using might not recognize raw Speex data, but only if it is wrapped in .ogg.
Though not a 100% definite answer, I hope this is of some help!