I'm following a tutorial about playing sound with OpenAL. Now everything works fine except I can't make the sound looping. I believe that I've used AL_LOOPING for the source. Now it can only play once and when it finishes playing, the app will block(doesn't response to my tap on the play button). Any ideas about what's wrong with the code?
// start up openAL
// init device and context
-(void)initOpenAL
{
// Initialization
mDevice = alcOpenDevice(NULL); // select the "preferred device"
if (mDevice) {
// use the device to make a context
mContext = alcCreateContext(mDevice, NULL);
// set my context to the currently active one
alcMakeContextCurrent(mContext);
}
}
// open the audio file
// returns a big audio ID struct
-(AudioFileID)openAudioFile:(NSString*)filePath
{
AudioFileID outAFID;
// use the NSURl instead of a cfurlref cuz it is easier
NSURL * afUrl = [NSURL fileURLWithPath:filePath];
// do some platform specific stuff..
#if TARGET_OS_IPHONE
OSStatus result = AudioFileOpenURL((CFURLRef)afUrl, kAudioFileReadPermission, 0, &outAFID);
#else
OSStatus result = AudioFileOpenURL((CFURLRef)afUrl, fsRdPerm, 0, &outAFID);
#endif
if (result != 0) NSLog(#"cannot openf file: %#",filePath);
return outAFID;
}
// find the audio portion of the file
// return the size in bytes
-(UInt32)audioFileSize:(AudioFileID)fileDescriptor
{
UInt64 outDataSize = 0;
UInt32 thePropSize = sizeof(UInt64);
OSStatus result = AudioFileGetProperty(fileDescriptor, kAudioFilePropertyAudioDataByteCount, &thePropSize, &outDataSize);
if(result != 0) NSLog(#"cannot find file size");
return (UInt32)outDataSize;
}
- (void)stopSound
{
alSourceStop(sourceID);
}
-(void)cleanUpOpenAL:(id)sender
{
// delete the sources
alDeleteSources(1, &sourceID);
// delete the buffers
alDeleteBuffers(1, &bufferID);
// destroy the context
alcDestroyContext(mContext);
// close the device
alcCloseDevice(mDevice);
}
-(IBAction)play:(id)sender
{
alSourcePlay(sourceID);
}
#pragma mark -
// Implement viewDidLoad to do additional setup after loading the view, typically from a nib.
- (void)viewDidLoad {
[super viewDidLoad];
[self initOpenAL];
// get the full path of the file
NSString* fileName = [[NSBundle mainBundle] pathForResource:#"sound" ofType:#"caf"];
// first, open the file
AudioFileID fileID = [self openAudioFile:fileName];
// find out how big the actual audio data is
UInt32 fileSize = [self audioFileSize:fileID];
// this is where the audio data will live for the moment
unsigned char * outData = malloc(fileSize);
// this where we actually get the bytes from the file and put them
// into the data buffer
OSStatus result = noErr;
result = AudioFileReadBytes(fileID, false, 0, &fileSize, outData);
AudioFileClose(fileID); //close the file
if (result != 0) NSLog(#"cannot load effect: %#", fileName);
//NSUInteger bufferID; // buffer is defined in head file
// grab a buffer ID from openAL
alGenBuffers(1, &bufferID);
// jam the audio data into the new buffer
alBufferData(bufferID, AL_FORMAT_STEREO16, outData, fileSize, 8000);
//NSUInteger sourceID; // source is defined in head file
// grab a source ID from openAL
alGenSources(1, &sourceID);
// attach the buffer to the source
alSourcei(sourceID, AL_BUFFER, bufferID);
// set some basic source prefs
alSourcef(sourceID, AL_PITCH, 1.0f);
alSourcef(sourceID, AL_GAIN, 1.0f);
alSourcei(sourceID, AL_LOOPING, AL_TRUE);
// clean up the buffer
if (outData)
{
free(outData);
outData = NULL;
}
}
You should be able to release outData right after your alBufferData() call. It exclude it as the culprit, you can try the static extension and manage the memory yourself. It's something like:
alBufferDataStaticProcPtr alBufferDataStaticProc = (alBufferDataStaticProcPtr)alcGetProcAddress(0, (const ALCchar *)"alBufferDataStatic");
alBufferDataStaticProc(bufferID, bitChanFormat, audioData, audioDataSize, dataFormat.mSampleRate);
Related
My task is to play an audio file that is saved locally in documents directory, apply audio effect in that audio file using Effect Audio Unit and save a new audio file in documents directory with that effect.
Here is my code that i have written so far, but its not working. Effects are not being applied in the audio. Please suggest me what is wrong in this code ?? Thanks in advance..
- (void) setUpAudioUnits
{
OSStatus setupErr = noErr;
// describe unit
AudioComponentDescription audioCompDesc;
audioCompDesc.componentType = kAudioUnitType_Output;
audioCompDesc.componentSubType = kAudioUnitSubType_RemoteIO;
audioCompDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
audioCompDesc.componentFlags = 0;
audioCompDesc.componentFlagsMask = 0;
// get rio unit from audio component manager
AudioComponent rioComponent = AudioComponentFindNext(NULL, &audioCompDesc);
setupErr = AudioComponentInstanceNew(rioComponent, &remoteIOUnit);
NSAssert (setupErr == noErr, #"Couldn't get RIO unit instance");
// set up the rio unit for playback
UInt32 oneFlag = 1;
AudioUnitElement outputElement = 0;
setupErr =
AudioUnitSetProperty (remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
outputElement,
&oneFlag,
sizeof(oneFlag));
NSAssert (setupErr == noErr, #"Couldn't enable RIO output");
// enable rio input
AudioUnitElement inputElement = 1;
// setup an asbd in the iphone canonical format
AudioStreamBasicDescription myASBD;
memset (&myASBD, 0, sizeof (myASBD));
// myASBD.mSampleRate = 44100;
myASBD.mSampleRate = hardwareSampleRate;
myASBD.mFormatID = kAudioFormatLinearPCM;
myASBD.mFormatFlags = kAudioFormatFlagsCanonical;
myASBD.mBytesPerPacket = 4;
myASBD.mFramesPerPacket = 1;
myASBD.mBytesPerFrame = 4;
myASBD.mChannelsPerFrame = 2;
myASBD.mBitsPerChannel = 16;
/*
// set format for output (bus 0) on rio's input scope
*/
setupErr =
AudioUnitSetProperty (remoteIOUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
outputElement,
&myASBD,
sizeof (myASBD));
NSAssert (setupErr == noErr, #"Couldn't set ASBD for RIO on input scope / bus 0");
// song must be an LPCM file, preferably in caf container
// to convert, use /usr/bin/afconvert, like this:
// /usr/bin/afconvert --data LEI16 Girlfriend.m4a song.caf
// read in the entire audio file (NOT recommended)
// better to use a ring buffer: thread or timer fills, render callback drains
NSURL *songURL = [NSURL fileURLWithPath:
[[NSBundle mainBundle] pathForResource: #"song"
ofType: #"caf"]];
AudioFileID songFile;
setupErr = AudioFileOpenURL((CFURLRef) songURL,
kAudioFileReadPermission,
0,
&songFile);
NSAssert (setupErr == noErr, #"Couldn't open audio file");
UInt64 audioDataByteCount;
UInt32 audioDataByteCountSize = sizeof (audioDataByteCount);
setupErr = AudioFileGetProperty(songFile,
kAudioFilePropertyAudioDataByteCount,
&audioDataByteCountSize,
&audioDataByteCount);
NSAssert (setupErr == noErr, #"Couldn't get size property");
musicPlaybackState.audioData = malloc (audioDataByteCount);
musicPlaybackState.audioDataByteCount = audioDataByteCount;
musicPlaybackState.samplePtr = musicPlaybackState.audioData;
NSLog (#"reading %qu bytes from file", audioDataByteCount);
UInt32 bytesRead = audioDataByteCount;
setupErr = AudioFileReadBytes(songFile,
false,
0,
&bytesRead,
musicPlaybackState.audioData);
NSAssert (setupErr == noErr, #"Couldn't read audio data");
NSLog (#"read %d bytes from file", bytesRead);
AudioStreamBasicDescription fileASBD;
UInt32 asbdSize = sizeof (fileASBD);
setupErr = AudioFileGetProperty(songFile,
kAudioFilePropertyDataFormat,
&asbdSize,
&fileASBD);
NSAssert (setupErr == noErr, #"Couldn't get file asbd");
ExtAudioFileCreateWithURL(outputFileURL,
kAudioFileCAFType,
&fileASBD,
nil,
kAudioFileFlags_EraseFile,
&musicPlaybackState.extAudioFile);
// get the mixer unit
AudioComponentDescription mixerDesc;
mixerDesc.componentType = kAudioUnitType_Effect;
mixerDesc.componentSubType = kAudioUnitSubType_Delay;
mixerDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
mixerDesc.componentFlags = 0;
mixerDesc.componentFlagsMask = 0;
// get mixer unit from audio component manager
AudioComponent mixerComponent = AudioComponentFindNext(NULL, &mixerDesc);
setupErr = AudioComponentInstanceNew(mixerComponent, &mixerUnit);
NSAssert (setupErr == noErr, #"Couldn't get mixer unit instance");
// set up connections and callbacks
// connect mixer bus 0 input to robot voice render callback
effectState.rioUnit = remoteIOUnit;
effectState.sineFrequency = 23;
effectState.sinePhase = 0;
effectState.asbd = myASBD;
// connect mixer bus 1 input to music player callback
AURenderCallbackStruct musicPlayerCallbackStruct;
musicPlayerCallbackStruct.inputProc = MusicPlayerCallback; // callback function
musicPlayerCallbackStruct.inputProcRefCon = &musicPlaybackState;
setupErr =
AudioUnitSetProperty(mixerUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
outputElement,
&musicPlayerCallbackStruct,
sizeof (musicPlayerCallbackStruct));
NSAssert (setupErr == noErr, #"Couldn't set mixer render callback on bus 1");
// direct connect mixer to output
AudioUnitConnection connection;
connection.sourceAudioUnit = mixerUnit;
connection.sourceOutputNumber = outputElement;
connection.destInputNumber = outputElement;
setupErr =
AudioUnitSetProperty(remoteIOUnit,
kAudioUnitProperty_MakeConnection,
kAudioUnitScope_Input,
outputElement,
&connection,
sizeof (connection));
NSAssert (setupErr == noErr, #"Couldn't set mixer-to-RIO connection");
setupErr = AudioUnitInitialize(mixerUnit);
NSAssert (setupErr == noErr, #"Couldn't initialize mixer unit");
setupErr = AudioUnitInitialize(remoteIOUnit);
NSAssert (setupErr == noErr, #"Couldn't initialize RIO unit");
setupErr = AudioOutputUnitStart (remoteIOUnit);
}
When you have instance of initialized audio unit, you can apply effect to sound using AudioUnitRender by providing AudioBufferList to it.
First of all, make sure that you have sound in format which accepted by Audio Unit. You can get this format by getting kAudioUnitProperty_StreamFormat property.
If your audio file has different format than one you got from audio unit, you can convert audio "on the fly" by using ExtAudioFile. To achieve this, you must set kExtAudioFileProperty_ClientDataFormat property in ExtAudioFile to format which you got from 'kAudioUnitProperty_StreamFormat'. Now, when you will read audio file you will get audio in needed format.
Also, make sure that kAudioUnitProperty_ShouldAllocateBuffer property of Audio Unit is set to 1.
To call AudioUnitRender you must prepare valid AudioTimeStamp, AudioUnitRenderActionFlags (can be set to 0) and AudioBufferList. You don't need to allocate memory for buffers, you need just provide number of buffers and it's size.
AudioBufferList *buffer = malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer));
buffer->mNumberBuffers = 2; // at least 2 buffers
buffer->mBuffers[0].mDataByteSize = ...; // size of one buffer
buffer->mBuffers[1].mDataByteSize = ...;
AudioUnitRenderActionFlags flags = 0;
AudioTimeStamp timeStamp;
memset(&timeStamp, 0, sizeof(AudioTimeStamp));
timeStamp.mFlags = kAudioTimeStampSampleTimeValid;
UInt32 frames = ...; // number of frames in buffer
AudioUnit unit = ...; // your Delay unit
Now you can call AudioUnitRender:
AudioUnitRender(unit, &flags, &timeStamp, 0, frames, buffer);
Audio unit will ask callback for fill buffers and apply effect to sound, after that you will have buffers with valid audio. In this case you need to set kAudioUnitProperty_SetRenderCallback property to valid callback.
everyone , I have a problem about the API-- alSourceUnqueueBuffers when I use the OpenAL Libaray.
My problem as follows:
1.I play a pcm-music though streaming mechanism.
2.The application can queue up one or multiple buffer names using alSourceQueueBuffers.
when a buffer has been processed. I want to fill new audio data in my function: getSourceState . but when I use the API of OpenAL alSourceUnqueueBuffers. it returns an error
--- AL_INVALID_OPERATION . I do this as the document about the OpenAL.
so I test a way to solve this problem. I use alSourceStop(source) before the api alSourceUnqueueBuffers, an use alSourcePlay(source) after i filled new data though
alBufferData & alSourceQueueBuffers. but it is bad. because It breaks down the music.
who can help me to find this problem ?
and where i can find more information and method about openAL?
I am waiting for your help . thanks , everyone.
so my code as follows:
.h:
#interface myPlayback : NSObject
{
ALuint source;
ALuint * buffers;
ALCcontext* context;
ALCdevice* device;
unsigned long long offset;
ALenum m_format;
ALsizei m_freq;
void* data;
}
#end
.m
- (void)initOpenAL
{
ALenum error;
// Create a new OpenAL Device
// Pass NULL to specify the system’s default output device
device = alcOpenDevice(NULL);
if (device != NULL)
{
// Create a new OpenAL Context
// The new context will render to the OpenAL Device just created
context = alcCreateContext(device, 0);
if (context != NULL)
{
// Make the new context the Current OpenAL Context
alcMakeContextCurrent(context);
// Create some OpenAL Buffer Objects
buffers = (ALuint*)malloc(sizeof(ALuint) * 5);
alGenBuffers(5, buffers);
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"Error Generating Buffers: %x", error);
exit(1);
}
// Create some OpenAL Source Objects
alGenSources(1, &source);
if(alGetError() != AL_NO_ERROR)
{
NSLog(#"Error generating sources! %x\n", error);
exit(1);
}
}
}
// clear any errors
alGetError();
[self initBuffer];
[self initSource];
}
- (void) initBuffer
{
ALenum error = AL_NO_ERROR;
ALenum format;
ALsizei size;
ALsizei freq;
NSBundle* bundle = [NSBundle mainBundle];
// get some audio data from a wave file
CFURLRef fileURL = (CFURLRef)[[NSURL fileURLWithPath:[bundle pathForResource:#"4" ofType:#"caf"]] retain];
if (fileURL)
{
data = MyGetOpenALAudioData(fileURL, &size, &format, &freq);
CFRelease(fileURL);
m_freq = freq;
m_format = format;
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"error loading sound: %x\n", error);
exit(1);
}
alBufferData(buffers[0], format, data, READ_SIZE , freq);
offset += READ_SIZE;
alBufferData(buffers[1], format, data + offset, READ_SIZE, freq);
offset += READ_SIZE;
alBufferData(buffers[2], format, data + offset, READ_SIZE, freq);
offset += READ_SIZE;
alBufferData(buffers[3], format, data + offset, READ_SIZE, freq);
offset += READ_SIZE;
alBufferData(buffers[4], format, data + offset, READ_SIZE, freq);
offset += READ_SIZE;
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"error attaching audio to buffer: %x\n", error);
}
}
else
NSLog(#"Could not find file!\n");
}
- (void) initSource
{
ALenum error = AL_NO_ERROR;
alGetError(); // Clear the error
// Turn Looping ON
alSourcei(source, AL_LOOPING, AL_TRUE);
// Set Source Position
float sourcePosAL[] = {sourcePos.x, kDefaultDistance, sourcePos.y};
alSourcefv(source, AL_POSITION, sourcePosAL);
// Set Source Reference Distance
alSourcef(source, AL_REFERENCE_DISTANCE, 50.0f);
alSourceQueueBuffers(source, 5, buffers);
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"Error attaching buffer to source: %x\n", error);
exit(1);
}
}
- (void)startSound
{
ALenum error;
NSLog(#"Start!\n");
// Begin playing our source file
alSourcePlay(source);
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"error starting source: %x\n", error);
} else {
// Mark our state as playing (the view looks at this)
self.isPlaying = YES;
}
while (1) {
[self getSourceState];
}
}
-(void)getSourceState
{
int queued;
int processed;
int state;
alGetSourcei(source, AL_BUFFERS_QUEUED, &queued);
alGetSourcei(source, AL_BUFFERS_PROCESSED, &processed);
alGetSourcei(source, AL_SOURCE_STATE, &state);
NSLog(#"%d", queued);
NSLog(#"%d", processed);
NSLog(#"===================================");
while (processed > 0) {
for (int i = 0; i < processed; ++i) {
ALuint buf;
alGetError();
// alSourceStop(source);
ALenum y = alGetError();
NSLog(#"%d", y);
alSourceUnqueueBuffers(source, 1, &buf);
ALenum i = alGetError();
NSLog(#"%d", i);
processed --;
alBufferData(buf, m_format, data + offset, READ_SIZE, m_freq);
ALenum j = alGetError();
NSLog(#"%d", j);
alSourceQueueBuffers(source, 1, &buf);
ALenum k = alGetError();
NSLog(#"%d", k);
offset += READ_SIZE;
// alSourcePlay(source);
}
}
// [self getSourceState];
}
I found the reason about the problem.
the reason I turn Looping ON : alSourcei(source, AL_LOOPING, AL_TRUE);
if you set this , when the source processed a buffer, you want to fill new data or delete the buffer from the source. you will get the error.
I am trying to play the recorded content simultaneously while recording. Currently I am using AVAudioRecorder for recording and AVAudioPlayer for playing.
When I was trying to play the content simultaneously nothing is playing. Please find the pseudo code for what I am doing.
If I do the same stuff after stop the recording everything works fine.
AVAudioRecorder *recorder; //Initializing the recorder properly.
[recorder record];
NSError *error=nil;
NSUrl recordingPathUrl; //Contains the recording path.
AVAudioPlayer *audioPlayer = [[AVAudioPlayer alloc] initWithContentsOfURL:recordingPathUrl
error:&error];
[audioPlayer prepareToPlay];
[audioPlayer play];
Could you please anybody let me know your thoughts or ideas?
This is achievable , Use these link and download it:
https://code.google.com/p/ios-coreaudio-example/downloads/detail?name=Aruts.zip&can=2&q=
This link will play sound from speaker but will not record it , I have implemented record functionality as well Below is full code description..
IN .h File
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#ifndef max
#define max( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef min
#define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#interface IosAudioController : NSObject {
AudioComponentInstance audioUnit;
AudioBuffer tempBuffer; // this will hold the latest data from the microphone
ExtAudioFileRef mAudioFileRef;
}
#property (readonly)ExtAudioFileRef mAudioFileRef;
#property (readonly) AudioComponentInstance audioUnit;
#property (readonly) AudioBuffer tempBuffer;
- (void) start;
- (void) stop;
- (void) processAudio: (AudioBufferList*) bufferList;
#end
// setup a global iosAudio variable, accessible everywhere
extern IosAudioController* iosAudio;
IN .m
#import "IosAudioController.h"
#import <AudioToolbox/AudioToolbox.h>
#import <AVFoundation/AVFoundation.h>
#define kOutputBus 0
#define kInputBus 1
IosAudioController* iosAudio;
void checkStatus(int status){
if (status) {
printf("Status not 0! %d\n", status);
// exit(1);
}
}
static void printAudioUnitRenderActionFlags(AudioUnitRenderActionFlags * ioActionFlags)
{
if (*ioActionFlags == 0) {
printf("AudioUnitRenderActionFlags(%lu) ", *ioActionFlags);
return;
}
printf("AudioUnitRenderActionFlags(%lu): ", *ioActionFlags);
if (*ioActionFlags & kAudioUnitRenderAction_PreRender) printf("kAudioUnitRenderAction_PreRender ");
if (*ioActionFlags & kAudioUnitRenderAction_PostRender) printf("kAudioUnitRenderAction_PostRender ");
if (*ioActionFlags & kAudioUnitRenderAction_OutputIsSilence) printf("kAudioUnitRenderAction_OutputIsSilence ");
if (*ioActionFlags & kAudioOfflineUnitRenderAction_Preflight) printf("kAudioOfflineUnitRenderAction_Prefli ght ");
if (*ioActionFlags & kAudioOfflineUnitRenderAction_Render) printf("kAudioOfflineUnitRenderAction_Render");
if (*ioActionFlags & kAudioOfflineUnitRenderAction_Complete) printf("kAudioOfflineUnitRenderAction_Complete ");
if (*ioActionFlags & kAudioUnitRenderAction_PostRenderError) printf("kAudioUnitRenderAction_PostRenderError ");
if (*ioActionFlags & kAudioUnitRenderAction_DoNotCheckRenderArgs) printf("kAudioUnitRenderAction_DoNotCheckRenderArgs ");
}
/**
This callback is called when new audio data from the microphone is
available.
*/
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
double timeInSeconds = inTimeStamp->mSampleTime / 44100.00;
printf("\n%fs inBusNumber: %lu inNumberFrames: %lu ", timeInSeconds, inBusNumber, inNumberFrames);
printAudioUnitRenderActionFlags(ioActionFlags);
// Because of the way our audio format (setup below) is chosen:
// we only need 1 buffer, since it is mono
// Samples are 16 bits = 2 bytes.
// 1 frame includes only 1 sample
AudioBuffer buffer;
buffer.mNumberChannels = 1;
buffer.mDataByteSize = inNumberFrames * 2;
buffer.mData = malloc( inNumberFrames * 2 );
// Put buffer in a AudioBufferList
AudioBufferList bufferList;
SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
memset (&samples, 0, sizeof (samples));
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0] = buffer;
// Then:
// Obtain recorded samples
OSStatus status;
status = AudioUnitRender([iosAudio audioUnit],
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);
checkStatus(status);
// Now, we have the samples we just read sitting in buffers in bufferList
// Process the new data
[iosAudio processAudio:&bufferList];
// Now, we have the samples we just read sitting in buffers in bufferList
ExtAudioFileWriteAsync([iosAudio mAudioFileRef], inNumberFrames, &bufferList);
// release the malloc'ed data in the buffer we created earlier
free(bufferList.mBuffers[0].mData);
return noErr;
}
/**
This callback is called when the audioUnit needs new data to play through the
speakers. If you don't have any, just don't write anything in the buffers
*/
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Notes: ioData contains buffers (may be more than one!)
// Fill them up as much as you can. Remember to set the size value in each buffer to match how
// much data is in the buffer.
for (int i=0; i < ioData->mNumberBuffers; i++) { // in practice we will only ever have 1 buffer, since audio format is mono
AudioBuffer buffer = ioData->mBuffers[i];
// NSLog(#" Buffer %d has %d channels and wants %d bytes of data.", i, buffer.mNumberChannels, buffer.mDataByteSize);
// copy temporary buffer data to output buffer
UInt32 size = min(buffer.mDataByteSize, [iosAudio tempBuffer].mDataByteSize); // dont copy more data then we have, or then fits
memcpy(buffer.mData, [iosAudio tempBuffer].mData, size);
buffer.mDataByteSize = size; // indicate how much data we wrote in the buffer
// uncomment to hear random noise
/*
UInt16 *frameBuffer = buffer.mData;
for (int j = 0; j < inNumberFrames; j++) {
frameBuffer[j] = rand();
}
*/
}
return noErr;
}
#implementation IosAudioController
#synthesize audioUnit, tempBuffer,mAudioFileRef;
/**
Initialize the audioUnit and allocate our own temporary buffer.
The temporary buffer will hold the latest data coming in from the microphone,
and will be copied to the output when this is requested.
*/
- (id) init {
self = [super init];
OSStatus status;
AVAudioSession *session = [AVAudioSession sharedInstance];
NSLog(#"%f",session.preferredIOBufferDuration);
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &audioUnit);
checkStatus(status);
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
checkStatus(status);
// Enable IO for playback
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
checkStatus(status);
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status);
// Set output callback
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status);
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
flag = 0;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus,
&flag,
sizeof(flag));
// set preferred buffer size
Float32 audioBufferSize = (0.023220);
UInt32 size = sizeof(audioBufferSize);
status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration,
size, &audioBufferSize);
// Allocate our own buffers (1 channel, 16 bits per sample, thus 16 bits per frame, thus 2 bytes per frame).
// Practice learns the buffers used contain 512 frames, if this changes it will be fixed in processAudio.
tempBuffer.mNumberChannels = 1;
tempBuffer.mDataByteSize = 512 * 2;
tempBuffer.mData = malloc( 512 * 2 );
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *destinationFilePath = [[NSString alloc] initWithFormat: #"%#/output.caf", documentsDirectory];
NSLog(#">>> %#\n", destinationFilePath);
CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, ( CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);
OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &mAudioFileRef);
CFRelease(destinationURL);
NSAssert(setupErr == noErr, #"Couldn't create file for writing");
setupErr = ExtAudioFileSetProperty(mAudioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);
NSAssert(setupErr == noErr, #"Couldn't create file for format");
setupErr = ExtAudioFileWriteAsync(mAudioFileRef, 0, NULL);
NSAssert(setupErr == noErr, #"Couldn't initialize write buffers for audio file");
// Initialise
status = AudioUnitInitialize(audioUnit);
checkStatus(status);
// [NSTimer scheduledTimerWithTimeInterval:5 target:self selector:#selector(stopRecording:) userInfo:nil repeats:NO];
return self;
}
/**
Start the audioUnit. This means data will be provided from
the microphone, and requested for feeding to the speakers, by
use of the provided callbacks.
*/
- (void) start {
OSStatus status = AudioOutputUnitStart(audioUnit);
checkStatus(status);
}
/**
Stop the audioUnit
*/
- (void) stop {
OSStatus status = AudioOutputUnitStop(audioUnit);
checkStatus(status);
[self stopRecording:nil];
}
/**
Change this function to decide what is done with incoming
audio data from the microphone.
Right now we copy it to our own temporary buffer.
*/
- (void) processAudio: (AudioBufferList*) bufferList{
AudioBuffer sourceBuffer = bufferList->mBuffers[0];
// fix tempBuffer size if it's the wrong size
if (tempBuffer.mDataByteSize != sourceBuffer.mDataByteSize) {
free(tempBuffer.mData);
tempBuffer.mDataByteSize = sourceBuffer.mDataByteSize;
tempBuffer.mData = malloc(sourceBuffer.mDataByteSize);
}
// copy incoming audio data to temporary buffer
memcpy(tempBuffer.mData, bufferList->mBuffers[0].mData, bufferList->mBuffers[0].mDataByteSize);
}
- (void)stopRecording:(NSTimer*)theTimer
{
printf("\nstopRecording\n");
OSStatus status = ExtAudioFileDispose(mAudioFileRef);
printf("OSStatus(ExtAudioFileDispose): %ld\n", status);
}
/**
Clean up.
*/
- (void) dealloc {
[super dealloc];
AudioUnitUninitialize(audioUnit);
free(tempBuffer.mData);
}
This Will definitely help you people..
Another Best Way of Doing this is to download Audio Touch from https://github.com/tkzic/audiograph and see Echo function of this application it repeat voice as you speak , but it does not record audio so Add Recording function into it , AS mentioned below:
IN MixerHostAudio.h
#property (readwrite) ExtAudioFileRef mRecordFile;
-(void)Record;
-(void)StopRecord;
IN MixerHostAudio.m
//ADD these two function in this class
-(void)Record{
NSString *completeFileNameAndPath = [[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject] stringByAppendingString:#"/Record.wav"];
//create the url that the recording object needs to reference the file
CFURLRef audioFileURL = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)[completeFileNameAndPath cStringUsingEncoding:[NSString defaultCStringEncoding]] , strlen([completeFileNameAndPath cStringUsingEncoding:[NSString defaultCStringEncoding]]), false);
AudioStreamBasicDescription dstFormat, clientFormat;
memset(&dstFormat, 0, sizeof(dstFormat));
memset(&clientFormat, 0, sizeof(clientFormat));
AudioFileTypeID fileTypeId = kAudioFileWAVEType;
UInt32 size = sizeof(dstFormat);
dstFormat.mFormatID = kAudioFormatLinearPCM;
// setup the output file format
dstFormat.mSampleRate = 44100.0; // set sample rate
// create a 16-bit 44100kHz Stereo format
dstFormat.mChannelsPerFrame = 2;
dstFormat.mBitsPerChannel = 16;
dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 4;
dstFormat.mFramesPerPacket = 1;
dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
//get the client format directly from
UInt32 asbdSize = sizeof (AudioStreamBasicDescription);
AudioUnitGetProperty(mixerUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0, // input bus
&clientFormat,
&asbdSize);
ExtAudioFileCreateWithURL(audioFileURL, fileTypeId, &dstFormat, NULL, kAudioFileFlags_EraseFile, &mRecordFile);
printf("recording\n");
ExtAudioFileSetProperty(mRecordFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
//call this once as this will alloc space on the first call
ExtAudioFileWriteAsync(mRecordFile, 0, NULL);
}
-(void)StopRecord{
ExtAudioFileDispose(mRecordFile);
}
//In micLineInCallback function Add this line at last before return noErr; :
ExtAudioFileWriteAsync([THIS mRecordFile] , inNumberFrames, ioData);
And call these function from MixerHostViewController.m in - (IBAction) playOrStop: (id) sender method
You'll need to use AudioUnits if you want real-time monitoring of your audio input.
Apple's Audio Unit Hosting Guide
Tutorial on configuring the Remote I/O Audio Unit
The RemoteIO Audio Unit can be used for simultaneous record and play. There are plenty of examples of recording using RemoteIO (aurioTouch) and playing using RemoteIO. Just enable both unit input and unit output, and handle both buffer callbacks. See an example here
CFStringRef state;
UInt32 propertySize = sizeof(CFStringRef);
// AudioSessionInitialize(NULL, NULL, NULL, NULL);
AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &propertySize, &state);
if(CFStringGetLength(state) == 0)
// if(state == 0)
{ //SILENT
NSLog(#"Silent switch is on");
// create vibrate
// AudioServicesPlaySystemSound(kSystemSoundID_Vibrate);
UInt32 audioCategory = kAudioSessionCategory_MediaPlayback;
AudioSessionSetProperty( kAudioSessionProperty_AudioCategory, sizeof(UInt32), &audioCategory);
}
else { //NOT SILENT
NSLog(#"Silent switch is off");
}
where ever i use Above code i am able to play sound file in Silent mode
but after playing recorded sound file in silent mode when i try to record voice again
I get an error
LIke
2010-12-08 13:29:56.710 VoiceRecorder[382:307] -66681
Could not start Audio Queue
Error starting recording
here is the code
// file url
[self setupAudioFormat:&recordState.dataFormat];
CFURLRef fileURL = CFURLCreateFromFileSystemRepresentation(NULL, (const UInt8 *) [filePath UTF8String], [filePath length], NO);
// recordState.currentPacket = 0;
// new input queue
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat, HandleInputBuffer, &recordState, CFRunLoopGetCurrent(),kCFRunLoopCommonModes, 0, &recordState.queue);
if (status) {CFRelease(fileURL); printf("Could not establish new queue\n"); return NO;}
// create new audio file
status = AudioFileCreateWithURL(fileURL, kAudioFileAIFFType, &recordState.dataFormat, kAudioFileFlags_EraseFile, &recordState.audioFile); CFRelease(fileURL); // thanks august joki
if (status) {printf("Could not create file to record audio\n"); return NO;}
// figure out the buffer size
DeriveBufferSize(recordState.queue, recordState.dataFormat, 0.5, &recordState.bufferByteSize); // allocate those buffers and enqueue them
for(int i = 0; i < NUM_BUFFERS; i++)
{
status = AudioQueueAllocateBuffer(recordState.queue, recordState.bufferByteSize, &recordState.buffers[i]);
if (status) {printf("Error allocating buffer %d\n", i); return NO;}
status = AudioQueueEnqueueBuffer(recordState.queue, recordState.buffers[i], 0, NULL);
if (status) {printf("Error enqueuing buffer %d\n", i); return NO;}
} // enable metering
UInt32 enableMetering = YES;
status = AudioQueueSetProperty(recordState.queue, kAudioQueueProperty_EnableLevelMetering, &enableMetering,sizeof(enableMetering));
if (status) {printf("Could not enable metering\n"); return NO;}
// start recording
status = AudioQueueStart(recordState.queue, NULL); // status = 0; NSLog(#"%d",status);
if (status) {printf("Could not start Audio Queue\n"); return NO;}
recordState.currentPacket = 0;
recordState.recording = YES;
return YES;
i get an error here
I was facing similar problem in iOS 7.1. Add following in AppDelegate's didFinishLaunchingWithOptions :
AVAudioSession * audioSession = [AVAudioSession sharedInstance];
[audioSession setCategory:AVAudioSessionCategoryPlayAndRecord error: nil];
[audioSession setActive:YES error: nil];
EDIT : Above code is working for me
I would like to use the Core Audio extended audio file services framework to read a mp3 file, process it as a PCM, then write the modified file back as a mp3 file. I am able to convert the mp3 file to PCM, but am NOT able to write the PCM file back as a mp3.
I have followed and analyzed the Apple ExtAudioFileConvertTest sample and also cannot get that to work. The failure point is when I set the client format for the output file(set to a canonical PCM type). This fails with error "fmt?" if the output target type is set to mp3.
Is it possible to do mp3 -> PCM -> mp3 on the iPhone? If I remove the failing line, setting the kExtAudioFileProperty_ClientDataFormat for the output file, the code fails with "pkd?" when I try to write to the output file later. So basically I have 2 errors:
1) "fmt?" when trying to set kExtAudioFileProperty_ClientDataFormat for the output file
2) "pkd?" when trying to write to the output file
Here is the code to set up the files:
NSURL *fileUrl = [NSURL fileURLWithPath:sourceFilePath];
OSStatus error = noErr;
//
// Open the file
//
error = ExtAudioFileOpenURL((CFURLRef)fileUrl, &sourceFile);
if(error){
NSLog(#"AudioClip: Error opening file at %#. Error code %d", sourceFilePath, error);
return NO;
}
//
// Store the number of frames in the file
//
SInt64 numberOfFrames = 0;
UInt32 propSize = sizeof(SInt64);
error = ExtAudioFileGetProperty(sourceFile, kExtAudioFileProperty_FileLengthFrames, &propSize, &numberOfFrames);
if(error){
NSLog(#"AudioClip: Error retreiving number of frames: %d", error);
[self closeAudioFile];
return NO;
}
frameCount = numberOfFrames;
//
// Get the source file format info
//
propSize = sizeof(sourceFileFormat);
memset(&sourceFileFormat, 0, sizeof(AudioStreamBasicDescription));
error = ExtAudioFileGetProperty(sourceFile, kExtAudioFileProperty_FileDataFormat, &propSize, &sourceFileFormat);
if(error){
NSLog(#"AudioClip: Error getting source audio file properties: %d", error);
[self closeAudioFile];
return NO;
}
//
// Set the format for our read. We read in PCM, clip, then write out mp3
//
memset(&readFileFormat, 0, sizeof(AudioStreamBasicDescription));
readFileFormat.mFormatID = kAudioFormatLinearPCM;
readFileFormat.mSampleRate = 44100;
readFileFormat.mFormatFlags = kAudioFormatFlagsCanonical | kAudioFormatFlagIsNonInterleaved;
readFileFormat.mChannelsPerFrame = 1;
readFileFormat.mBitsPerChannel = 8 * sizeof(AudioSampleType);
readFileFormat.mFramesPerPacket = 1;
readFileFormat.mBytesPerFrame = sizeof(AudioSampleType);
readFileFormat.mBytesPerPacket = sizeof(AudioSampleType);
readFileFormat.mReserved = 0;
propSize = sizeof(readFileFormat);
error = ExtAudioFileSetProperty(sourceFile, kExtAudioFileProperty_ClientDataFormat, propSize, &readFileFormat);
if(error){
NSLog(#"AudioClip: Error setting read format: %d", error);
[self closeAudioFile];
return NO;
}
//
// Set the format for the output file that we will write
//
propSize = sizeof(targetFileFormat);
memset(&targetFileFormat, 0, sizeof(AudioStreamBasicDescription));
targetFileFormat.mFormatID = kAudioFormatMPEGLayer3;
targetFileFormat.mChannelsPerFrame = 1;
//
// Let the API fill in the rest
//
error = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &propSize, &targetFileFormat);
if(error){
NSLog(#"AudioClip: Error getting target file format info: %d", error);
[self closeAudioFile];
return NO;
}
//
// Create our target file
//
NSURL *writeURL = [NSURL fileURLWithPath:targetFilePath];
error = ExtAudioFileCreateWithURL( (CFURLRef)writeURL, kAudioFileMP3Type,
&targetFileFormat, NULL,
kAudioFileFlags_EraseFile,
&targetFile);
if(error){
NSLog(#"AudioClip: Error opening target file for writing: %d", error);
[self closeAudioFile];
return NO;
}
//
// Set the client format for the output file the same as our client format for the input file
//
propSize = sizeof(readFileFormat);
error = ExtAudioFileSetProperty(targetFile, kExtAudioFileProperty_ClientDataFormat, propSize, &readFileFormat);
if(error){
NSLog(#"AudioClip: Error, cannot set client format for output file: %d", error);
[self closeAudioFile];
return NO;
}
And the code to read/write:
NSInteger framesToRead = finalFrameNumber - startFrameNumber;
while(framesToRead > 0){
//
// Read frames into our data
//
short *data = (short *)malloc(framesToRead * sizeof(short));
if(!data){
NSLog(#"AudioPlayer: Cannot init memory for read buffer");
[self notifyDelegateFailure];
[self closeAudioFile];
return;
}
AudioBufferList bufferList;
OSStatus error = noErr;
UInt32 loadedPackets = framesToRead;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mData = data;
bufferList.mBuffers[0].mDataByteSize = (framesToRead * sizeof(short));
NSLog(#"AudioClip: Before read nNumberBuffers = %d, mNumberChannels = %d, mData = %p, mDataByteSize = %d",
bufferList.mNumberBuffers, bufferList.mBuffers[0].mNumberChannels, bufferList.mBuffers[0].mData,
bufferList.mBuffers[0].mDataByteSize);
error = ExtAudioFileRead(sourceFile, &loadedPackets, &bufferList);
if(error){
NSLog(#"AudioClip: Error %d from ExtAudioFileRead", error);
[self notifyDelegateFailure];
[self closeAudioFile];
return;
}
//
// Now write the data to our file which will convert it into a mp3 file
//
NSLog(#"AudioClip: After read nNumberBuffers = %d, mNumberChannels = %d, mData = %p, mDataByteSize = %d",
bufferList.mNumberBuffers, bufferList.mBuffers[0].mNumberChannels, bufferList.mBuffers[0].mData,
bufferList.mBuffers[0].mDataByteSize);
error = ExtAudioFileWrite(targetFile, loadedPackets, &bufferList);
if(error){
NSLog(#"AudioClip: Error %d from ExtAudioFileWrite", error);
[self notifyDelegateFailure];
[self closeAudioFile];
return;
}
framesToRead -= loadedPackets;
}
Apple doesn't supply an MP3 encoder- only a decoder. The source document is a bit outdated, but AFAIK it is still current: http://developer.apple.com/library/ios/#documentation/MusicAudio/Conceptual/CoreAudioOverview/SupportedAudioFormatsMacOSX/SupportedAudioFormatsMacOSX.html%23//apple_ref/doc/uid/TP40003577-CH7-SW1
I think your best bet might be to use AAC.