I record audio file using the following code which I developed according to apple sample code
void AQRecorder::SetupAudioFormat(UInt32 inFormatID)
{
memset(&mRecordFormat, 0, sizeof(mRecordFormat));
UInt32 size = sizeof(mRecordFormat.mSampleRate);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareSampleRate,
&size,
&mRecordFormat.mSampleRate), "couldn't get hardware sample rate");
size = sizeof(mRecordFormat.mChannelsPerFrame);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareInputNumberChannels,
&size,
&mRecordFormat.mChannelsPerFrame), "couldn't get input channel count");
mRecordFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM)
{
// if we want pcm, default to signed 16-bit little-endian
mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
mRecordFormat.mBitsPerChannel = 16;
mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
mRecordFormat.mFramesPerPacket = 1;
}
}
void AQRecorder::StartRecord(CFStringRef inRecordFile)
{
int i, bufferByteSize;
UInt32 size;
CFURLRef url;
try {
mFileName = CFStringCreateCopy(kCFAllocatorDefault, inRecordFile);
// specify the recording format
SetupAudioFormat(kAudioFormatLinearPCM);
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
this /* userData */,
NULL /* run loop */, NULL /* run loop mode */,
0 /* flags */, &mQueue), "AudioQueueNewInput failed");
// get the record format back from the queue's audio converter --
// the file may require a more specific stream description than was necessary to create the encoder.
mRecordPacket = 0;
size = sizeof(mRecordFormat);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
&mRecordFormat, &size), "couldn't get queue's format");
NSString *recordFile = [NSTemporaryDirectory() stringByAppendingPathComponent: (NSString*)inRecordFile];
NSLog(recordFile);
url = CFURLCreateWithString(kCFAllocatorDefault, (CFStringRef)recordFile, NULL);
// create the audio file kAudioFileCAFType
XThrowIfError(AudioFileCreateWithURL(url, kAudioFileWAVEType, &mRecordFormat, kAudioFileFlags_EraseFile,
&mRecordFile), "AudioFileCreateWithURL failed");
CFRelease(url);
// copy the cookie first to give the file object as much info as we can about the data going in
// not necessary for pcm, but required for some compressed audio
CopyEncoderCookieToFile();
// allocate and enqueue buffers
bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for half a second
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
// start the queue
mIsRunning = true;
XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
}
catch (CAXException &e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");
}
}
this code lead to large output file for example 2 minutes be saved on 11 MB
I need to modify the code such that it be AAC compressed , so I reduce its size any idea
Related
I am using OpenAL and ALUT on a project I am working on. I need to get the waveform from the buffer. I am using alutLoadWAVFile to load the data into the buffer. When I output each item in the buffer, I get something like this:
This is not the waveform, because the waveform looks like this in Audacity:
My code (the relevant parts anyway):
unsigned char* alBuffer;
...
alutLoadWAVFile((ALbyte*)("test2.wav"), &alFormatBuffer,
(void **)&alBuffer, (ALsizei*)&alBufferLen, &alFreqBuffer, &alLoop);
...
for (int i = 0; i < (alBufferLen>5000?5000:alBufferLen); i++) {
log << (int)data[i] << "\n";
}
I'm thinking that what you're expecting is that the buffer data to be exactly what you see in Audacity. This won't really be the case if the wav is anything other than 8Bit Mono (I think anyway, it's been a while).
Also, you seem to be casting data[i] from an unsigned char to an int, which may be another problem.
Also, you might want to attempt manually reading the wav file instead of using ALUT. ALUT is a crutch that makes you weak. :)
Here's a program that reads and plays a wav without ALUT. With it you'll be able to figure out the mono/stereoness and freq of the wav file. Hopefully from there you can start messing with the buffer directly and output exactly what you want.
#include <stdio.h>
#include <AL\al.h>
#include <AL\alc.h>
struct RIFF_Header {
char chunkID[4];
long chunkSize;
char format[4];
};
struct WAVE_Format {
char subChunkID[4];
long subChunkSize;
short audioFormat;
short numChannels;
long sampleRate;
long byteRate;
short blockAlign;
short bitsPerSample;
};
struct WAVE_Data {
char subChunkID[4];
long subChunk2Size;
};
bool loadWavFile(const char* filename, ALuint* buffer,
ALsizei* size, ALsizei* frequency,
ALenum* format) {
FILE* soundFile = NULL;
WAVE_Format wave_format;
RIFF_Header riff_header;
WAVE_Data wave_data;
unsigned char* data;
try {
soundFile = fopen(filename, "rb");
if (!soundFile)
throw (filename);
fread(&riff_header, sizeof(RIFF_Header), 1, soundFile);
if ((riff_header.chunkID[0] != 'R' ||
riff_header.chunkID[1] != 'I' ||
riff_header.chunkID[2] != 'F' ||
riff_header.chunkID[3] != 'F') &&
(riff_header.format[0] != 'W' ||
riff_header.format[1] != 'A' ||
riff_header.format[2] != 'V' ||
riff_header.format[3] != 'E'))
throw ("Invalid RIFF or WAVE Header");
fread(&wave_format, sizeof(WAVE_Format), 1, soundFile);
if (wave_format.subChunkID[0] != 'f' ||
wave_format.subChunkID[1] != 'm' ||
wave_format.subChunkID[2] != 't' ||
wave_format.subChunkID[3] != ' ')
throw ("Invalid Wave Format");
if (wave_format.subChunkSize > 16)
fseek(soundFile, sizeof(short), SEEK_CUR);
fread(&wave_data, sizeof(WAVE_Data), 1, soundFile);
if (wave_data.subChunkID[0] != 'd' ||
wave_data.subChunkID[1] != 'a' ||
wave_data.subChunkID[2] != 't' ||
wave_data.subChunkID[3] != 'a')
throw ("Invalid data header");
data = new unsigned char[wave_data.subChunk2Size];
if (!fread(data, wave_data.subChunk2Size, 1, soundFile))
throw ("error loading WAVE data into struct!");
*size = wave_data.subChunk2Size;
*frequency = wave_format.sampleRate;
if (wave_format.numChannels == 1) {
if (wave_format.bitsPerSample == 8 )
*format = AL_FORMAT_MONO8;
else if (wave_format.bitsPerSample == 16)
*format = AL_FORMAT_MONO16;
} else if (wave_format.numChannels == 2) {
if (wave_format.bitsPerSample == 8 )
*format = AL_FORMAT_STEREO8;
else if (wave_format.bitsPerSample == 16)
*format = AL_FORMAT_STEREO16;
}
alGenBuffers(1, buffer);
alBufferData(*buffer, *format, (void*)data,
*size, *frequency);
fclose(soundFile);
return true;
} catch(char* error) {
if (soundFile != NULL)
fclose(soundFile);
return false;
}
}
int main(){
//Sound play data
ALint state; // The state of the sound source
ALuint bufferID; // The OpenAL sound buffer ID
ALuint sourceID; // The OpenAL sound source
ALenum format; // The sound data format
ALsizei freq; // The frequency of the sound data
ALsizei size; // Data size
ALCdevice* device = alcOpenDevice(NULL);
ALCcontext* context = alcCreateContext(device, NULL);
alcMakeContextCurrent(context);
// Create sound buffer and source
alGenBuffers(1, &bufferID);
alGenSources(1, &sourceID);
// Set the source and listener to the same location
alListener3f(AL_POSITION, 0.0f, 0.0f, 0.0f);
alSource3f(sourceID, AL_POSITION, 0.0f, 0.0f, 0.0f);
loadWavFile("..\\wavdata\\YOURWAVHERE.wav", &bufferID, &size, &freq, &format);
alSourcei(sourceID, AL_BUFFER, bufferID);
alSourcePlay(sourceID);
do{
alGetSourcei(sourceID, AL_SOURCE_STATE, &state);
} while (state != AL_STOPPED);
alDeleteBuffers(1, &bufferID);
alDeleteSources(1, &sourceID);
alcDestroyContext(context);
alcCloseDevice(device);
return 0;
}
Some people suggested to read the audio data from end to start and create a copy written from start to end, and then simply play that reversed audio data.
Are there existing examples for iOS how this is done?
I found an example project called MixerHost, which at some point uses an
AudioUnitSampleType holding the audio data that has been read from file, and assigning it to a buffer.
This is defined as:
typedef SInt32 AudioUnitSampleType;
#define kAudioUnitSampleFractionBits 24
And according to Apple:
The canonical audio sample type for audio units and other audio
processing in iPhone OS is noninterleaved linear PCM with 8.24-bit
fixed-point samples.
So in other words it holds noninterleaved linear PCM audio data.
But I can't figure out where this data is beeing read in, and where it is stored. Here's the code that loads the audio data and buffers it:
- (void) readAudioFilesIntoMemory {
for (int audioFile = 0; audioFile < NUM_FILES; ++audioFile) {
NSLog (#"readAudioFilesIntoMemory - file %i", audioFile);
// Instantiate an extended audio file object.
ExtAudioFileRef audioFileObject = 0;
// Open an audio file and associate it with the extended audio file object.
OSStatus result = ExtAudioFileOpenURL (sourceURLArray[audioFile], &audioFileObject);
if (noErr != result || NULL == audioFileObject) {[self printErrorMessage: #"ExtAudioFileOpenURL" withStatus: result]; return;}
// Get the audio file's length in frames.
UInt64 totalFramesInFile = 0;
UInt32 frameLengthPropertySize = sizeof (totalFramesInFile);
result = ExtAudioFileGetProperty (
audioFileObject,
kExtAudioFileProperty_FileLengthFrames,
&frameLengthPropertySize,
&totalFramesInFile
);
if (noErr != result) {[self printErrorMessage: #"ExtAudioFileGetProperty (audio file length in frames)" withStatus: result]; return;}
// Assign the frame count to the soundStructArray instance variable
soundStructArray[audioFile].frameCount = totalFramesInFile;
// Get the audio file's number of channels.
AudioStreamBasicDescription fileAudioFormat = {0};
UInt32 formatPropertySize = sizeof (fileAudioFormat);
result = ExtAudioFileGetProperty (
audioFileObject,
kExtAudioFileProperty_FileDataFormat,
&formatPropertySize,
&fileAudioFormat
);
if (noErr != result) {[self printErrorMessage: #"ExtAudioFileGetProperty (file audio format)" withStatus: result]; return;}
UInt32 channelCount = fileAudioFormat.mChannelsPerFrame;
// Allocate memory in the soundStructArray instance variable to hold the left channel,
// or mono, audio data
soundStructArray[audioFile].audioDataLeft =
(AudioUnitSampleType *) calloc (totalFramesInFile, sizeof (AudioUnitSampleType));
AudioStreamBasicDescription importFormat = {0};
if (2 == channelCount) {
soundStructArray[audioFile].isStereo = YES;
// Sound is stereo, so allocate memory in the soundStructArray instance variable to
// hold the right channel audio data
soundStructArray[audioFile].audioDataRight =
(AudioUnitSampleType *) calloc (totalFramesInFile, sizeof (AudioUnitSampleType));
importFormat = stereoStreamFormat;
} else if (1 == channelCount) {
soundStructArray[audioFile].isStereo = NO;
importFormat = monoStreamFormat;
} else {
NSLog (#"*** WARNING: File format not supported - wrong number of channels");
ExtAudioFileDispose (audioFileObject);
return;
}
// Assign the appropriate mixer input bus stream data format to the extended audio
// file object. This is the format used for the audio data placed into the audio
// buffer in the SoundStruct data structure, which is in turn used in the
// inputRenderCallback callback function.
result = ExtAudioFileSetProperty (
audioFileObject,
kExtAudioFileProperty_ClientDataFormat,
sizeof (importFormat),
&importFormat
);
if (noErr != result) {[self printErrorMessage: #"ExtAudioFileSetProperty (client data format)" withStatus: result]; return;}
// Set up an AudioBufferList struct, which has two roles:
//
// 1. It gives the ExtAudioFileRead function the configuration it
// needs to correctly provide the data to the buffer.
//
// 2. It points to the soundStructArray[audioFile].audioDataLeft buffer, so
// that audio data obtained from disk using the ExtAudioFileRead function
// goes to that buffer
// Allocate memory for the buffer list struct according to the number of
// channels it represents.
AudioBufferList *bufferList;
bufferList = (AudioBufferList *) malloc (
sizeof (AudioBufferList) + sizeof (AudioBuffer) * (channelCount - 1)
);
if (NULL == bufferList) {NSLog (#"*** malloc failure for allocating bufferList memory"); return;}
// initialize the mNumberBuffers member
bufferList->mNumberBuffers = channelCount;
// initialize the mBuffers member to 0
AudioBuffer emptyBuffer = {0};
size_t arrayIndex;
for (arrayIndex = 0; arrayIndex < channelCount; arrayIndex++) {
bufferList->mBuffers[arrayIndex] = emptyBuffer;
}
// set up the AudioBuffer structs in the buffer list
bufferList->mBuffers[0].mNumberChannels = 1;
bufferList->mBuffers[0].mDataByteSize = totalFramesInFile * sizeof (AudioUnitSampleType);
bufferList->mBuffers[0].mData = soundStructArray[audioFile].audioDataLeft;
if (2 == channelCount) {
bufferList->mBuffers[1].mNumberChannels = 1;
bufferList->mBuffers[1].mDataByteSize = totalFramesInFile * sizeof (AudioUnitSampleType);
bufferList->mBuffers[1].mData = soundStructArray[audioFile].audioDataRight;
}
// Perform a synchronous, sequential read of the audio data out of the file and
// into the soundStructArray[audioFile].audioDataLeft and (if stereo) .audioDataRight members.
UInt32 numberOfPacketsToRead = (UInt32) totalFramesInFile;
result = ExtAudioFileRead (
audioFileObject,
&numberOfPacketsToRead,
bufferList
);
free (bufferList);
if (noErr != result) {
[self printErrorMessage: #"ExtAudioFileRead failure - " withStatus: result];
// If reading from the file failed, then free the memory for the sound buffer.
free (soundStructArray[audioFile].audioDataLeft);
soundStructArray[audioFile].audioDataLeft = 0;
if (2 == channelCount) {
free (soundStructArray[audioFile].audioDataRight);
soundStructArray[audioFile].audioDataRight = 0;
}
ExtAudioFileDispose (audioFileObject);
return;
}
NSLog (#"Finished reading file %i into memory", audioFile);
// Set the sample index to zero, so that playback starts at the
// beginning of the sound.
soundStructArray[audioFile].sampleNumber = 0;
// Dispose of the extended audio file object, which also
// closes the associated file.
ExtAudioFileDispose (audioFileObject);
}
}
Which part contains the array of audio samples which have to be reversed? Is it the AudioUnitSampleType?
bufferList->mBuffers[0].mData = soundStructArray[audioFile].audioDataLeft;
Note: audioDataLeft is defined as an AudioUnitSampleType, which is an SInt32 but not an array.
I found a clue in a Core Audio Mailing list:
Well, nothing to do with iPh*n* as far as I know (unless some audio
API has been omitted -- I am not a member of that program). AFAIR,
AudioFile.h and ExtendedAudioFile.h should provide you with what you
need to read or write a caf and access its streams/channels.
Basically, you want to read each channel/stream backwards, so, if you
don't need properties of the audio file it is pretty straightforward
once you have a handle on that channel's data, assuming it is not in a
compressed format. Considering the number of formats a caf can
represent, this could take a few more lines of code than you're
thinking. Once you have a handle on uncompressed data, it should be
about as easy as reversing a string. Then you would of course replace
the file's data with the reversed data, or you could just feed the
audio output (or wherever you're sending the reversed signal) reading
whatever stream you have backwards.
This is what I tried, but when I assign my reversed buffer to the mData of both channels, I hear nothing:
AudioUnitSampleType *leftData = soundStructArray[audioFile].audioDataLeft;
AudioUnitSampleType *reversedData = (AudioUnitSampleType *) calloc (totalFramesInFile, sizeof (AudioUnitSampleType));
UInt64 j = 0;
for (UInt64 i = (totalFramesInFile - 1); i > -1; i--) {
reversedData[j] = leftData[i];
j++;
}
I have worked on a sample app, which records what user says and plays them backwards. I have used CoreAudio to achieve this. Link to app code.
/*
As each sample is 16-bits in size(2 bytes)(mono channel).
You can load each sample at a time by copying it into a different buffer by starting at the end of the recording and
reading backwards. When you get to the start of the data you have reversed the data and playing will be reversed.
*/
// set up output file
AudioFileID outputAudioFile;
AudioStreamBasicDescription myPCMFormat;
myPCMFormat.mSampleRate = 16000.00;
myPCMFormat.mFormatID = kAudioFormatLinearPCM ;
myPCMFormat.mFormatFlags = kAudioFormatFlagsCanonical;
myPCMFormat.mChannelsPerFrame = 1;
myPCMFormat.mFramesPerPacket = 1;
myPCMFormat.mBitsPerChannel = 16;
myPCMFormat.mBytesPerPacket = 2;
myPCMFormat.mBytesPerFrame = 2;
AudioFileCreateWithURL((__bridge CFURLRef)self.flippedAudioUrl,
kAudioFileCAFType,
&myPCMFormat,
kAudioFileFlags_EraseFile,
&outputAudioFile);
// set up input file
AudioFileID inputAudioFile;
OSStatus theErr = noErr;
UInt64 fileDataSize = 0;
AudioStreamBasicDescription theFileFormat;
UInt32 thePropertySize = sizeof(theFileFormat);
theErr = AudioFileOpenURL((__bridge CFURLRef)self.recordedAudioUrl, kAudioFileReadPermission, 0, &inputAudioFile);
thePropertySize = sizeof(fileDataSize);
theErr = AudioFileGetProperty(inputAudioFile, kAudioFilePropertyAudioDataByteCount, &thePropertySize, &fileDataSize);
UInt32 dataSize = fileDataSize;
void* theData = malloc(dataSize);
//Read data into buffer
UInt32 readPoint = dataSize;
UInt32 writePoint = 0;
while( readPoint > 0 )
{
UInt32 bytesToRead = 2;
AudioFileReadBytes( inputAudioFile, false, readPoint, &bytesToRead, theData );
AudioFileWriteBytes( outputAudioFile, false, writePoint, &bytesToRead, theData );
writePoint += 2;
readPoint -= 2;
}
free(theData);
AudioFileClose(inputAudioFile);
AudioFileClose(outputAudioFile);
Hope this helps.
Typically, when an ASBD is being used, the fields describe the complete layout of the sample data in the buffers that are represented by this description - where typically those buffers are represented by an AudioBuffer that is contained in an AudioBufferList.
However, when an ASBD has the kAudioFormatFlagIsNonInterleaved flag, the AudioBufferList has a different structure and semantic. In this case, the ASBD fields will describe the format of ONE of the AudioBuffers that are contained in the list, AND each AudioBuffer in the list is determined to have a single (mono) channel of audio data. Then, the ASBD's mChannelsPerFrame will indicate the total number of AudioBuffers that are contained within the AudioBufferList - where each buffer contains one channel. This is used primarily with the AudioUnit (and AudioConverter) representation of this list - and won't be found in the AudioHardware usage of this structure.
You do not have to allocate a separate buffer to store the reversed data, this can take a fair bit of CPU, depending on the length of sound. To play a sound backwards, just make the sampleNumber counter start at totalFramesInFile - 1.
You can modify MixerHost like this, to achieve the desired effect.
Replace soundStructArray[audioFile].sampleNumber = 0; with
soundStructArray[audioFile].sampleNumber = totalFramesInFile - 1;
Make sampleNumber SInt32 instead of UInt32.
Replace the loop which you write the samples out with this.
for (UInt32 frameNumber = 0; frameNumber < inNumberFrames; ++frameNumber) {
outSamplesChannelLeft[frameNumber] = dataInLeft[sampleNumber];
if (isStereo) outSamplesChannelRight[frameNumber] = dataInRight[sampleNumber];
if (--sampleNumber < 0) sampleNumber = frameTotalForSound - 1;
}
This effectively makes it play backwards. Mmmm. It's been a while since I've heard the MixerHost music. I must admit I find it to be quite pleasing.
I'm looking at this example: https://developer.apple.com/library/ios/#samplecode/SpeakHere/Introduction/Intro.html
I modified it ( the AQRecorder.mm)to record mp3 instead of caf file. I changed from kAudioFileCAFType to kAudioFileMP3Type but it does not create the file.
The code became
void AQRecorder::SetupAudioFormat(UInt32 inFormatID)
{
memset(&mRecordFormat, 0, sizeof(mRecordFormat));
UInt32 size = sizeof(mRecordFormat.mSampleRate);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareSampleRate,
&size,
&mRecordFormat.mSampleRate), "couldn't get hardware sample rate");
size = sizeof(mRecordFormat.mChannelsPerFrame);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareInputNumberChannels,
&size,
&mRecordFormat.mChannelsPerFrame), "couldn't get input channel count");
mRecordFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM)
{
// if we want pcm, default to signed 16-bit little-endian
mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
mRecordFormat.mBitsPerChannel = 16;
mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
mRecordFormat.mFramesPerPacket = 1;
}
}
void AQRecorder::StartRecord(CFStringRef inRecordFile)
{
int i, bufferByteSize;
UInt32 size;
CFURLRef url;
try {
mFileName = CFStringCreateCopy(kCFAllocatorDefault, inRecordFile);
// specify the recording format
SetupAudioFormat(kAudioFormatLinearPCM);
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
this /* userData */,
NULL /* run loop */, NULL /* run loop mode */,
0 /* flags */, &mQueue), "AudioQueueNewInput failed");
// get the record format back from the queue's audio converter --
// the file may require a more specific stream description than was necessary to create the encoder.
mRecordPacket = 0;
size = sizeof(mRecordFormat);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
&mRecordFormat, &size), "couldn't get queue's format");
NSString *recordFile = [NSTemporaryDirectory() stringByAppendingPathComponent: (NSString*)inRecordFile];
NSLog(recordFile);
url = CFURLCreateWithString(kCFAllocatorDefault, (CFStringRef)recordFile, NULL);
// create the audio file kAudioFileCAFType
XThrowIfError(AudioFileCreateWithURL(url, kAudioFileMP3Type, &mRecordFormat, kAudioFileFlags_EraseFile,
&mRecordFile), "AudioFileCreateWithURL failed");
CFRelease(url);
// copy the cookie first to give the file object as much info as we can about the data going in
// not necessary for pcm, but required for some compressed audio
CopyEncoderCookieToFile();
// allocate and enqueue buffers
bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for half a second
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
// start the queue
mIsRunning = true;
XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
}
catch (CAXException &e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");
}
}
Am I missing any settings, or what's wrong with my code? , mp3 be supported from apple
https://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/AudioFileConvertRef/Reference/reference.html
iOS devices don't support recording to the MP3 encoding format. Actually, I don't think any of the iOS devices do. You have to choose an alternate format. Core Audio can read, but not write, MP3 files.
You can use Lame library for encoding caf to mp3 file format. Check this sample iOSMp3Recorder
I successfully compiled libavcodec with speex enabled.
I modified example from FFMPEG docs to encode the sample audio into Speex.
But the result file cannot be played with VLC Player(which has Speex decoder).
Any tips?
static void audio_encode_example(const char *filename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int frame_size, i, j, out_size, outbuf_size;
FILE *f;
short *samples;
float t, tincr;
uint8_t *outbuf;
printf("Audio encoding\n");
/* find the MP2 encoder */
codec = avcodec_find_encoder(CODEC_ID_SPEEX);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
c= avcodec_alloc_context();
/* put sample parameters */
c->bit_rate = 64000;
c->sample_rate = 32000;
c->channels = 2;
c->sample_fmt=AV_SAMPLE_FMT_S16;
/* open it */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
/* the codec gives us the frame size, in samples */
frame_size = c->frame_size;
printf("frame size %d\n",frame_size);
samples =(short*) malloc(frame_size * 2 * c->channels);
outbuf_size = 10000;
outbuf =( uint8_t*) malloc(outbuf_size);
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "could not open %s\n", filename);
exit(1);
}
/* encode a single tone sound */
t = 0;
tincr = 2 * M_PI * 440.0 / c->sample_rate;
for(i=0;i<200;i++) {
for(j=0;j<frame_size;j++) {
samples[2*j] = (int)(sin(t) * 10000);
samples[2*j+1] = samples[2*j];
t += tincr;
}
/* encode the samples */
out_size = avcodec_encode_audio(c, outbuf, outbuf_size, samples);
fwrite(outbuf, 1, out_size, f);
}
fclose(f);
free(outbuf);
free(samples);
avcodec_close(c);
av_free(c);
}
int main(int argc, char **argv)
{
avcodec_register_all();
audio_encode_example(argv[1]);
return 0;
}
Does Speex (I don't know it) by chance require a container format into which these frames are placed, with some kind of header? You're just taking the output of an encoder and dumping into a file without going through any formatting (libavformat).
Try encoding the same data into Speex using the ffmpeg command line utility and see if the resulting file plays.
I'm looking at some info at www.speex.org and it seems that speex data is put into .ogg files. The player you are using might not recognize raw Speex data, but only if it is wrapped in .ogg.
Though not a 100% definite answer, I hope this is of some help!
I'm writing a CoreAudio backend for an audio library called XAL. Input buffers can be of various sample rates. I'm using a single audio unit for output. Idea is to convert the buffers and mix them prior to sending them to the audio unit.
Everything works as long as the input buffer has the same properties (sample rate, channel count, etc) as the output audio unit. Hence, the mixing part works.
However, I'm stuck with sample rate and channel count conversion. From what I figured out, this is easiest to do with Audio Converter Services API. I've managed to construct a converter; the idea is that the output format is the same as the output unit format, but possibly adjusted for purposes of the converter.
Audio converter is successfully constructed, but upon calling AudioConverterFillComplexBuffer(), I get output status error -50.
I'd love if I could get another set of eyeballs on this code. Problem is probably somewhere below AudioConverterNew(). Variable stream contains incoming (and outgoing) buffer data, and streamSize contains byte-size of incoming (and outgoing) buffer data.
What did I do wrong?
void CoreAudio_AudioManager::_convertStream(Buffer* buffer, unsigned char** stream, int *streamSize)
{
if (buffer->getBitsPerSample() != unitDescription.mBitsPerChannel ||
buffer->getChannels() != unitDescription.mChannelsPerFrame ||
buffer->getSamplingRate() != unitDescription.mSampleRate)
{
printf("INPUT STREAM SIZE: %d\n", *streamSize);
// describe the input format's description
AudioStreamBasicDescription inputDescription;
memset(&inputDescription, 0, sizeof(inputDescription));
inputDescription.mFormatID = kAudioFormatLinearPCM;
inputDescription.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger;
inputDescription.mChannelsPerFrame = buffer->getChannels();
inputDescription.mSampleRate = buffer->getSamplingRate();
inputDescription.mBitsPerChannel = buffer->getBitsPerSample();
inputDescription.mBytesPerFrame = (inputDescription.mBitsPerChannel * inputDescription.mChannelsPerFrame) / 8;
inputDescription.mFramesPerPacket = 1; //*streamSize / inputDescription.mBytesPerFrame;
inputDescription.mBytesPerPacket = inputDescription.mBytesPerFrame * inputDescription.mFramesPerPacket;
printf("INPUT : %lu bytes per packet for sample rate %g, channels %d\n", inputDescription.mBytesPerPacket, inputDescription.mSampleRate, inputDescription.mChannelsPerFrame);
// copy conversion output format's description from the
// output audio unit's description.
// then adjust framesPerPacket to match the input we'll be passing.
// framecount of our input stream is based on the input bytecount.
// output stream will have same number of frames, but different
// number of bytes.
AudioStreamBasicDescription outputDescription = unitDescription;
outputDescription.mFramesPerPacket = 1; //inputDescription.mFramesPerPacket;
outputDescription.mBytesPerPacket = outputDescription.mBytesPerFrame * outputDescription.mFramesPerPacket;
printf("OUTPUT : %lu bytes per packet for sample rate %g, channels %d\n", outputDescription.mBytesPerPacket, outputDescription.mSampleRate, outputDescription.mChannelsPerFrame);
// create an audio converter
AudioConverterRef audioConverter;
OSStatus acCreationResult = AudioConverterNew(&inputDescription, &outputDescription, &audioConverter);
printf("Created audio converter %p (status: %d)\n", audioConverter, acCreationResult);
if(!audioConverter)
{
// bail out
free(*stream);
*streamSize = 0;
*stream = (unsigned char*)malloc(0);
return;
}
// calculate number of bytes required for output of input stream.
// allocate buffer of adequate size.
UInt32 outputBytes = outputDescription.mBytesPerPacket * (*streamSize / inputDescription.mBytesPerFrame); // outputDescription.mFramesPerPacket * outputDescription.mBytesPerFrame;
unsigned char *outputBuffer = (unsigned char*)malloc(outputBytes);
memset(outputBuffer, 0, outputBytes);
printf("OUTPUT BYTES : %d\n", outputBytes);
// describe input data we'll pass into converter
AudioBuffer inputBuffer;
inputBuffer.mNumberChannels = inputDescription.mChannelsPerFrame;
inputBuffer.mDataByteSize = *streamSize;
inputBuffer.mData = *stream;
// describe output data buffers into which we can receive data.
AudioBufferList outputBufferList;
outputBufferList.mNumberBuffers = 1;
outputBufferList.mBuffers[0].mNumberChannels = outputDescription.mChannelsPerFrame;
outputBufferList.mBuffers[0].mDataByteSize = outputBytes;
outputBufferList.mBuffers[0].mData = outputBuffer;
// set output data packet size
UInt32 outputDataPacketSize = outputDescription.mBytesPerPacket;
// convert
OSStatus result = AudioConverterFillComplexBuffer(audioConverter, /* AudioConverterRef inAudioConverter */
CoreAudio_AudioManager::_converterComplexInputDataProc, /* AudioConverterComplexInputDataProc inInputDataProc */
&inputBuffer, /* void *inInputDataProcUserData */
&outputDataPacketSize, /* UInt32 *ioOutputDataPacketSize */
&outputBufferList, /* AudioBufferList *outOutputData */
NULL /* AudioStreamPacketDescription *outPacketDescription */
);
printf("Result: %d wheee\n", result);
// change "stream" to describe our output buffer.
// even if error occured, we'd rather have silence than unconverted audio.
free(*stream);
*stream = outputBuffer;
*streamSize = outputBytes;
// dispose of the audio converter
AudioConverterDispose(audioConverter);
}
}
OSStatus CoreAudio_AudioManager::_converterComplexInputDataProc(AudioConverterRef inAudioConverter,
UInt32* ioNumberDataPackets,
AudioBufferList* ioData,
AudioStreamPacketDescription** ioDataPacketDescription,
void* inUserData)
{
printf("Converter\n");
if(*ioNumberDataPackets != 1)
{
xal::log("_converterComplexInputDataProc cannot provide input data; invalid number of packets requested");
*ioNumberDataPackets = 0;
ioData->mNumberBuffers = 0;
return -50;
}
*ioNumberDataPackets = 1;
ioData->mNumberBuffers = 1;
ioData->mBuffers[0] = *(AudioBuffer*)inUserData;
*ioDataPacketDescription = NULL;
return 0;
}
Working code for Core Audio sample rate conversion and channel count conversion, using Audio Converter Services (now available as a part of the BSD-licensed XAL audio library):
void CoreAudio_AudioManager::_convertStream(Buffer* buffer, unsigned char** stream, int *streamSize)
{
if (buffer->getBitsPerSample() != unitDescription.mBitsPerChannel ||
buffer->getChannels() != unitDescription.mChannelsPerFrame ||
buffer->getSamplingRate() != unitDescription.mSampleRate)
{
// describe the input format's description
AudioStreamBasicDescription inputDescription;
memset(&inputDescription, 0, sizeof(inputDescription));
inputDescription.mFormatID = kAudioFormatLinearPCM;
inputDescription.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger;
inputDescription.mChannelsPerFrame = buffer->getChannels();
inputDescription.mSampleRate = buffer->getSamplingRate();
inputDescription.mBitsPerChannel = buffer->getBitsPerSample();
inputDescription.mBytesPerFrame = (inputDescription.mBitsPerChannel * inputDescription.mChannelsPerFrame) / 8;
inputDescription.mFramesPerPacket = 1; //*streamSize / inputDescription.mBytesPerFrame;
inputDescription.mBytesPerPacket = inputDescription.mBytesPerFrame * inputDescription.mFramesPerPacket;
// copy conversion output format's description from the
// output audio unit's description.
// then adjust framesPerPacket to match the input we'll be passing.
// framecount of our input stream is based on the input bytecount.
// output stream will have same number of frames, but different
// number of bytes.
AudioStreamBasicDescription outputDescription = unitDescription;
outputDescription.mFramesPerPacket = 1; //inputDescription.mFramesPerPacket;
outputDescription.mBytesPerPacket = outputDescription.mBytesPerFrame * outputDescription.mFramesPerPacket;
// create an audio converter
AudioConverterRef audioConverter;
OSStatus acCreationResult = AudioConverterNew(&inputDescription, &outputDescription, &audioConverter);
if(!audioConverter)
{
// bail out
free(*stream);
*streamSize = 0;
*stream = (unsigned char*)malloc(0);
return;
}
// calculate number of bytes required for output of input stream.
// allocate buffer of adequate size.
UInt32 outputBytes = outputDescription.mBytesPerPacket * (*streamSize / inputDescription.mBytesPerPacket); // outputDescription.mFramesPerPacket * outputDescription.mBytesPerFrame;
unsigned char *outputBuffer = (unsigned char*)malloc(outputBytes);
memset(outputBuffer, 0, outputBytes);
// describe input data we'll pass into converter
AudioBuffer inputBuffer;
inputBuffer.mNumberChannels = inputDescription.mChannelsPerFrame;
inputBuffer.mDataByteSize = *streamSize;
inputBuffer.mData = *stream;
// describe output data buffers into which we can receive data.
AudioBufferList outputBufferList;
outputBufferList.mNumberBuffers = 1;
outputBufferList.mBuffers[0].mNumberChannels = outputDescription.mChannelsPerFrame;
outputBufferList.mBuffers[0].mDataByteSize = outputBytes;
outputBufferList.mBuffers[0].mData = outputBuffer;
// set output data packet size
UInt32 outputDataPacketSize = outputBytes / outputDescription.mBytesPerPacket;
// fill class members with data that we'll pass into
// the InputDataProc
_converter_currentBuffer = &inputBuffer;
_converter_currentInputDescription = inputDescription;
// convert
OSStatus result = AudioConverterFillComplexBuffer(audioConverter, /* AudioConverterRef inAudioConverter */
CoreAudio_AudioManager::_converterComplexInputDataProc, /* AudioConverterComplexInputDataProc inInputDataProc */
this, /* void *inInputDataProcUserData */
&outputDataPacketSize, /* UInt32 *ioOutputDataPacketSize */
&outputBufferList, /* AudioBufferList *outOutputData */
NULL /* AudioStreamPacketDescription *outPacketDescription */
);
// change "stream" to describe our output buffer.
// even if error occured, we'd rather have silence than unconverted audio.
free(*stream);
*stream = outputBuffer;
*streamSize = outputBytes;
// dispose of the audio converter
AudioConverterDispose(audioConverter);
}
}
OSStatus CoreAudio_AudioManager::_converterComplexInputDataProc(AudioConverterRef inAudioConverter,
UInt32* ioNumberDataPackets,
AudioBufferList* ioData,
AudioStreamPacketDescription** ioDataPacketDescription,
void* inUserData)
{
if(ioDataPacketDescription)
{
xal::log("_converterComplexInputDataProc cannot provide input data; it doesn't know how to provide packet descriptions");
*ioDataPacketDescription = NULL;
*ioNumberDataPackets = 0;
ioData->mNumberBuffers = 0;
return 501;
}
CoreAudio_AudioManager *self = (CoreAudio_AudioManager*)inUserData;
ioData->mNumberBuffers = 1;
ioData->mBuffers[0] = *(self->_converter_currentBuffer);
*ioNumberDataPackets = ioData->mBuffers[0].mDataByteSize / self->_converter_currentInputDescription.mBytesPerPacket;
return 0;
}
In the header, as part of the CoreAudio_AudioManager class, here are relevant instance variables:
AudioStreamBasicDescription unitDescription;
AudioBuffer *_converter_currentBuffer;
AudioStreamBasicDescription _converter_currentInputDescription;
A few months later, I'm looking at this and I've realized that I didn't document the changes.
If you are interested in what the changes were:
look at the callback function CoreAudio_AudioManager::_converterComplexInputDataProc
one has to properly specify the number of output packets into ioNumberDataPackets
this has required introduction of new instance variables to hold both the buffer (the previous inUserData) and the input description (used to calculate the number of packets to be fed into Core Audio's converter)
this calculation of "output" packets (those fed into the converter) is done based on amount of data that our callback received, and the number of bytes per packet that the input format contains
Hopefully this edit will help a future reader (myself included)!