Video from Set of Images have RGB problem - iphone

HI Guys,
I used ffmpeg for creating video from sequence of Images. Following is My coding.
-(void)imageToMov:(NSString*)videoName imageNumber:(int)imageNumber{
[[NSFileManager defaultManager]createDirectoryAtPath:[Utilities documentsPath:[NSString stringWithFormat:#"CoachedFiles/"]] attributes:nil];
[[NSFileManager defaultManager]createDirectoryAtPath:[Utilities documentsPath:[NSString stringWithFormat:#"CoachedFiles/%#/",videoName]] attributes:nil];
//创建文件
[[NSFileManager defaultManager]createFileAtPath:[Utilities documentsPath:[NSString stringWithFormat:#"CoachedFiles/%#/%#.mov",videoName,videoName]] contents:nil attributes:nil];
//[[NSFileManager defaultManager]createFileAtPath:[Utilities documentsPath:[NSString stringWithFormat:#"temp/temp.mov"]] contents:nil attributes:nil];
const char *outfilename = [[Utilities documentsPath:[NSString stringWithFormat:#"CoachedFiles/%#/%#.mov",videoName,videoName]]UTF8String];
UIImage * tempImage = [UIImage imageWithContentsOfFile:[Utilities documentsPath:[NSString stringWithFormat:#"temp/temp0000.jpeg"]]];
AVFormatContext *pFormatCtxEnc;
AVCodecContext *pCodecCtxEnc;
AVCodec *pCodecEnc;
AVFrame *pFrameEnc;
AVOutputFormat *pOutputFormat;
AVStream *video_st;
int i;
int outbuf_size;
uint8_t *outbuf;
int out_size;
// Register all formats and codecs
av_register_all();
// auto detect the output format from the name. default is mpeg.
pOutputFormat = av_guess_format(NULL, outfilename, NULL);
if (pOutputFormat == NULL)
return;
// allocate the output media context
pFormatCtxEnc = avformat_alloc_context();
if (pFormatCtxEnc == NULL)
return;
pFormatCtxEnc->oformat = pOutputFormat;
sprintf(pFormatCtxEnc->filename, "%s", outfilename);
video_st = av_new_stream(pFormatCtxEnc, 0); // 0 for video
pCodecCtxEnc = video_st->codec;
pCodecCtxEnc->codec_id = pOutputFormat->video_codec;
pCodecCtxEnc->codec_type = CODEC_TYPE_VIDEO;
// put sample parameters
pCodecCtxEnc->bit_rate = 500000;
// resolution must be a multiple of two
pCodecCtxEnc->width = tempImage.size.width;
pCodecCtxEnc->height = tempImage.size.height;
// frames per second
pCodecCtxEnc->time_base.den = 1;
pCodecCtxEnc->time_base.num = 1;
pCodecCtxEnc->pix_fmt = PIX_FMT_YUV420P;
pCodecCtxEnc->gop_size = 12; /* emit one intra frame every ten frames */
if (pCodecCtxEnc->codec_id == CODEC_ID_MPEG1VIDEO){
/* needed to avoid using macroblocks in which some coeffs overflow
this doesnt happen with normal video, it just happens here as the
motion of the chroma plane doesnt match the luma plane */
pCodecCtxEnc->mb_decision=2;
}
// some formats want stream headers to be seperate
if(!strcmp(pFormatCtxEnc->oformat->name, "mp4") || !strcmp(pFormatCtxEnc->oformat->name, "mov") || !strcmp(pFormatCtxEnc->oformat->name, "3gp"))
pCodecCtxEnc->flags |= CODEC_FLAG_GLOBAL_HEADER;
// set the output parameters (must be done even if no parameters).
if (av_set_parameters(pFormatCtxEnc, NULL) < 0) {
return;
}
// find the video encoder
pCodecEnc = avcodec_find_encoder(pCodecCtxEnc->codec_id);
if (pCodecEnc == NULL)
return;
// open it
if (avcodec_open(pCodecCtxEnc, pCodecEnc) < 0) {
return;
}
if (!(pFormatCtxEnc->oformat->flags & AVFMT_RAWPICTURE)) {
/* allocate output buffer */
/* XXX: API change will be done */
outbuf_size = 500000;
outbuf = av_malloc(outbuf_size);
}
pFrameEnc= avcodec_alloc_frame();
// open the output file, if needed
if (!(pOutputFormat->flags & AVFMT_NOFILE)) {
if (url_fopen(&pFormatCtxEnc->pb, outfilename, URL_WRONLY) < 0) {
//fprintf(stderr, "Could not open '%s'\n", filename);
return;
}
}
// write the stream header, if any
av_write_header(pFormatCtxEnc);
// Read frames and save frames to disk
int size = pCodecCtxEnc->width * pCodecCtxEnc->height;
uint8_t * picture_buf;
picture_buf = malloc((size * 3) / 2);
pFrameEnc->data[0] = picture_buf;
pFrameEnc->data[1] = pFrameEnc->data[0] + size;
pFrameEnc->data[2] = pFrameEnc->data[1] + size / 4;
pFrameEnc->linesize[0] = pCodecCtxEnc->width;
pFrameEnc->linesize[1] = pCodecCtxEnc->width / 2;
pFrameEnc->linesize[2] = pCodecCtxEnc->width / 2;
for (i=0;i<imageNumber;i++){
NSString *imgName = [NSString stringWithFormat:#"temp/temp%04d.jpeg",i];
NSLog(#"%#",imgName);
UIImage * image = [UIImage imageWithContentsOfFile:[Utilities documentsPath:imgName]];
[imgName release];
//创建avpicture
AVPicture pict;
//格式保持bgra,后面是输入图片的长宽
avpicture_alloc(&pict, PIX_FMT_BGRA, image.size.width, image.size.height);
//读取图片数据
CGImageRef cgimage = [image CGImage];
CGDataProviderRef dataProvider = CGImageGetDataProvider(cgimage);
CFDataRef data = CGDataProviderCopyData(dataProvider);
const uint8_t * imagedata = CFDataGetBytePtr(data);
//向avpicture填充数据
avpicture_fill(&pict, imagedata, PIX_FMT_BGRA, image.size.width, image.size.height);
//定义转换格式,从bgra转到yuv420
static int sws_flags = SWS_FAST_BILINEAR;
struct SwsContext * img_convert_ctx = sws_getContext(image.size.width,
image.size.height,
PIX_FMT_BGRA,
image.size.width,
image.size.height,
PIX_FMT_YUV420P,
sws_flags, NULL, NULL, NULL);
//转换图象数据格式
sws_scale (img_convert_ctx, pict.data, pict.linesize,
0, image.size.height,
pFrameEnc->data, pFrameEnc->linesize);
if (pFormatCtxEnc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
futur for that */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
pkt.data= (uint8_t *)pFrameEnc;
pkt.size= sizeof(AVPicture);
av_write_frame(pFormatCtxEnc, &pkt);
} else {
// encode the image
out_size = avcodec_encode_video(pCodecCtxEnc, outbuf, outbuf_size, pFrameEnc);
// if zero size, it means the image was buffered
if (out_size != 0) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.pts= pCodecCtxEnc->coded_frame->pts;
if(pCodecCtxEnc->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
pkt.data= outbuf;
pkt.size= out_size;
// write the compressed frame in the media file
av_write_frame(pFormatCtxEnc, &pkt);
}
}
}
// get the delayed frames
for(; out_size; i++) {
out_size = avcodec_encode_video(pCodecCtxEnc, outbuf, outbuf_size, NULL);
if (out_size != 0) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.pts= pCodecCtxEnc->coded_frame->pts;
if(pCodecCtxEnc->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
pkt.data= outbuf;
pkt.size= out_size;
// write the compressed frame in the media file
av_write_frame(pFormatCtxEnc, &pkt);
}
}
// Close the codec
//avcodec_close(pCodecCtxDec);
avcodec_close(pCodecCtxEnc);
// Free the YUV frame
//av_free(pFrameDec);
av_free(pFrameEnc);
av_free(outbuf);
// write the trailer, if any
av_write_trailer(pFormatCtxEnc);
// free the streams
for(i = 0; i < pFormatCtxEnc->nb_streams; i++) {
av_freep(&pFormatCtxEnc->streams[i]->codec);
av_freep(&pFormatCtxEnc->streams[i]);
}
if (!(pOutputFormat->flags & AVFMT_NOFILE)) {
/* close the output file */
//comment out this code to fix the record video issue. Kevin 2010-07-11
//url_fclose(&pFormatCtxEnc->pb);
}
/* free the stream */
av_free(pFormatCtxEnc);
if([[NSFileManager defaultManager]fileExistsAtPath:[Utilities documentsPath:[NSString stringWithFormat:#"temp/"]] isDirectory:NULL]){
[[NSFileManager defaultManager] removeItemAtPath:[Utilities documentsPath:[NSString stringWithFormat:#"temp/"]] error:nil];
}
//[self MergeVideoFileWithVideoName:videoName];
[self SaveFileDetails:videoName];
[alertView dismissWithClickedButtonIndex:0 animated:YES];
}
Now the Problem is Video is created successfully and the RGB color is greenish. Please notify my mistake on this coding.

I am not an expert on colors but I think your problem might be in the color space you use on your img_convert_ctx. You are using PIX_FMT_BGRA. That's different colorspace then RGB it's BGR, it has reversed colors. Try using PIX_FMT_RGBA instead.

Related

How to generate audio wave form programmatically while recording Voice in iOS?

How to generate audio wave form programmatically while recording Voice in iOS?
m working on voice modulation audio frequency in iOS... everything is working fine ...just need some best simple way to generate audio wave form on detection noise...
Please dont refer me the code tutorials of...speakhere and auriotouch... i need some best suggestions from native app developers.
I have recorded the audio and i made it play after recording . I have created waveform and attached screenshot . But it has to been drawn in the view as audio recording in progress
-(UIImage *) audioImageGraph:(SInt16 *) samples
normalizeMax:(SInt16) normalizeMax
sampleCount:(NSInteger) sampleCount
channelCount:(NSInteger) channelCount
imageHeight:(float) imageHeight {
CGSize imageSize = CGSizeMake(sampleCount, imageHeight);
UIGraphicsBeginImageContext(imageSize);
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextSetFillColorWithColor(context, [UIColor blackColor].CGColor);
CGContextSetAlpha(context,1.0);
CGRect rect;
rect.size = imageSize;
rect.origin.x = 0;
rect.origin.y = 0;
CGColorRef leftcolor = [[UIColor whiteColor] CGColor];
CGColorRef rightcolor = [[UIColor redColor] CGColor];
CGContextFillRect(context, rect);
CGContextSetLineWidth(context, 1.0);
float halfGraphHeight = (imageHeight / 2) / (float) channelCount ;
float centerLeft = halfGraphHeight;
float centerRight = (halfGraphHeight*3) ;
float sampleAdjustmentFactor = (imageHeight/ (float) channelCount) / (float) normalizeMax;
for (NSInteger intSample = 0 ; intSample < sampleCount ; intSample ++ ) {
SInt16 left = *samples++;
float pixels = (float) left;
pixels *= sampleAdjustmentFactor;
CGContextMoveToPoint(context, intSample, centerLeft-pixels);
CGContextAddLineToPoint(context, intSample, centerLeft+pixels);
CGContextSetStrokeColorWithColor(context, leftcolor);
CGContextStrokePath(context);
if (channelCount==2) {
SInt16 right = *samples++;
float pixels = (float) right;
pixels *= sampleAdjustmentFactor;
CGContextMoveToPoint(context, intSample, centerRight - pixels);
CGContextAddLineToPoint(context, intSample, centerRight + pixels);
CGContextSetStrokeColorWithColor(context, rightcolor);
CGContextStrokePath(context);
}
}
// Create new image
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
// Tidy up
UIGraphicsEndImageContext();
return newImage;
}
Next a method that takes a AVURLAsset, and returns PNG Data
- (NSData *) renderPNGAudioPictogramForAssett:(AVURLAsset *)songAsset {
NSError * error = nil;
AVAssetReader * reader = [[AVAssetReader alloc] initWithAsset:songAsset error:&error];
AVAssetTrack * songTrack = [songAsset.tracks objectAtIndex:0];
NSDictionary* outputSettingsDict = [[NSDictionary alloc] initWithObjectsAndKeys:
[NSNumber numberWithInt:kAudioFormatLinearPCM],AVFormatIDKey,
// [NSNumber numberWithInt:44100.0],AVSampleRateKey, /*Not Supported*/
// [NSNumber numberWithInt: 2],AVNumberOfChannelsKey, /*Not Supported*/
[NSNumber numberWithInt:16],AVLinearPCMBitDepthKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsBigEndianKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsFloatKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsNonInterleaved,
nil];
AVAssetReaderTrackOutput* output = [[AVAssetReaderTrackOutput alloc] initWithTrack:songTrack outputSettings:outputSettingsDict];
[reader addOutput:output];
[output release];
UInt32 sampleRate,channelCount;
NSArray* formatDesc = songTrack.formatDescriptions;
for(unsigned int i = 0; i < [formatDesc count]; ++i) {
CMAudioFormatDescriptionRef item = (CMAudioFormatDescriptionRef)[formatDesc objectAtIndex:i];
const AudioStreamBasicDescription* fmtDesc = CMAudioFormatDescriptionGetStreamBasicDescription (item);
if(fmtDesc ) {
sampleRate = fmtDesc->mSampleRate;
channelCount = fmtDesc->mChannelsPerFrame;
// NSLog(#"channels:%u, bytes/packet: %u, sampleRate %f",fmtDesc->mChannelsPerFrame, fmtDesc->mBytesPerPacket,fmtDesc->mSampleRate);
}
}
UInt32 bytesPerSample = 2 * channelCount;
SInt16 normalizeMax = 0;
NSMutableData * fullSongData = [[NSMutableData alloc] init];
[reader startReading];
UInt64 totalBytes = 0;
SInt64 totalLeft = 0;
SInt64 totalRight = 0;
NSInteger sampleTally = 0;
NSInteger samplesPerPixel = sampleRate / 50;
while (reader.status == AVAssetReaderStatusReading){
AVAssetReaderTrackOutput * trackOutput = (AVAssetReaderTrackOutput *)[reader.outputs objectAtIndex:0];
CMSampleBufferRef sampleBufferRef = [trackOutput copyNextSampleBuffer];
if (sampleBufferRef){
CMBlockBufferRef blockBufferRef = CMSampleBufferGetDataBuffer(sampleBufferRef);
size_t length = CMBlockBufferGetDataLength(blockBufferRef);
totalBytes += length;
NSAutoreleasePool *wader = [[NSAutoreleasePool alloc] init];
NSMutableData * data = [NSMutableData dataWithLength:length];
CMBlockBufferCopyDataBytes(blockBufferRef, 0, length, data.mutableBytes);
SInt16 * samples = (SInt16 *) data.mutableBytes;
int sampleCount = length / bytesPerSample;
for (int i = 0; i < sampleCount ; i ++) {
SInt16 left = *samples++;
totalLeft += left;
SInt16 right;
if (channelCount==2) {
right = *samples++;
totalRight += right;
}
sampleTally++;
if (sampleTally > samplesPerPixel) {
left = totalLeft / sampleTally;
SInt16 fix = abs(left);
if (fix > normalizeMax) {
normalizeMax = fix;
}
[fullSongData appendBytes:&left length:sizeof(left)];
if (channelCount==2) {
right = totalRight / sampleTally;
SInt16 fix = abs(right);
if (fix > normalizeMax) {
normalizeMax = fix;
}
[fullSongData appendBytes:&right length:sizeof(right)];
}
totalLeft = 0;
totalRight = 0;
sampleTally = 0;
}
}
[wader drain];
CMSampleBufferInvalidate(sampleBufferRef);
CFRelease(sampleBufferRef);
}
}
NSData * finalData = nil;
if (reader.status == AVAssetReaderStatusFailed || reader.status == AVAssetReaderStatusUnknown){
// Something went wrong. return nil
return nil;
}
if (reader.status == AVAssetReaderStatusCompleted){
NSLog(#"rendering output graphics using normalizeMax %d",normalizeMax);
UIImage *test = [self audioImageGraph:(SInt16 *)
fullSongData.bytes
normalizeMax:normalizeMax
sampleCount:fullSongData.length / 4
channelCount:2
imageHeight:100];
finalData = imageToData(test);
}
[fullSongData release];
[reader release];
return finalData;
}
I have
If you want real-time graphics derived from mic input, then use the RemoteIO Audio Unit, which is what most native iOS app developers use for low latency audio, and Metal or Open GL for drawing waveforms, which will give you the highest frame rates. You will need completely different code from that provided in your question to do so, as AVAssetRecording, Core Graphic line drawing and png rendering are far far too slow to use.
Update: with iOS 8 and newer, the Metal API may be able to render graphic visualizations with even greater performance than OpenGL.
Update 2: Here are some code snippets for recording live audio using Audio Units and drawing bit maps using Metal in Swift 3: https://gist.github.com/hotpaw2/f108a3c785c7287293d7e1e81390c20b
You should check out EZAudio (https://github.com/syedhali/EZAudio), specifically the EZRecorder and the EZAudioPlot (or GPU-accelerated EZAudioPlotGL).
There is also an example project that does exactly what you want, https://github.com/syedhali/EZAudio/tree/master/EZAudioExamples/iOS/EZAudioRecordExample
EDIT: Here's the code inline
/// In your interface
/**
Use a OpenGL based plot to visualize the data coming in
*/
#property (nonatomic,weak) IBOutlet EZAudioPlotGL *audioPlot;
/**
The microphone component
*/
#property (nonatomic,strong) EZMicrophone *microphone;
/**
The recorder component
*/
#property (nonatomic,strong) EZRecorder *recorder;
...
/// In your implementation
// Create an instance of the microphone and tell it to use this view controller instance as the delegate
-(void)viewDidLoad {
self.microphone = [EZMicrophone microphoneWithDelegate:self startsImmediately:YES];
}
// EZMicrophoneDelegate will provide these callbacks
-(void)microphone:(EZMicrophone *)microphone
hasAudioReceived:(float **)buffer
withBufferSize:(UInt32)bufferSize
withNumberOfChannels:(UInt32)numberOfChannels {
dispatch_async(dispatch_get_main_queue(),^{
// Updates the audio plot with the waveform data
[self.audioPlot updateBuffer:buffer[0] withBufferSize:bufferSize];
});
}
-(void)microphone:(EZMicrophone *)microphone hasAudioStreamBasicDescription:(AudioStreamBasicDescription)audioStreamBasicDescription {
// The AudioStreamBasicDescription of the microphone stream. This is useful when configuring the EZRecorder or telling another component what audio format type to expect.
// We can initialize the recorder with this ASBD
self.recorder = [EZRecorder recorderWithDestinationURL:[self testFilePathURL]
andSourceFormat:audioStreamBasicDescription];
}
-(void)microphone:(EZMicrophone *)microphone
hasBufferList:(AudioBufferList *)bufferList
withBufferSize:(UInt32)bufferSize
withNumberOfChannels:(UInt32)numberOfChannels {
// Getting audio data as a buffer list that can be directly fed into the EZRecorder. This is happening on the audio thread - any UI updating needs a GCD main queue block. This will keep appending data to the tail of the audio file.
if( self.isRecording ){
[self.recorder appendDataFromBufferList:bufferList
withBufferSize:bufferSize];
}
}
I was searching the same thing. (Making wave from the data of the audio recorder). I found some library that might be helpful and worth to check the code to understand the logic behind this.
The calculation is all based with sin and mathematic formula. This is much simple if you take a look to the code!
https://github.com/stefanceriu/SCSiriWaveformView
or
https://github.com/raffael/SISinusWaveView
This is only few examples that you can find on the web.

How to check results of ExtAudioFileRead?

I'm using ExtAudioFileRead to read WAV file into memory as float * buffer. However, I'm not quite sure about results - when I print them out, I get values from - 1 to + 1(which should be theoretically correct), but how can I be sure that they are correct?
- (float *) readTestFileAndSize: (int *) size
{
CFStringRef str = CFStringCreateWithCString(
NULL,
[[[NSBundle mainBundle] pathForResource: #"25" ofType:#"wav"] UTF8String],
kCFStringEncodingMacRoman
);
CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(
kCFAllocatorDefault,
str,
kCFURLPOSIXPathStyle,
false
);
ExtAudioFileRef fileRef;
ExtAudioFileOpenURL(inputFileURL, &fileRef);
SInt64 theFileLengthInFrames = 0;
// Get the total frame count
UInt32 thePropertySize = sizeof(theFileLengthInFrames);
ExtAudioFileGetProperty(fileRef, kExtAudioFileProperty_FileLengthFrames, &thePropertySize, &theFileLengthInFrames);
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat;
audioFormat.mBitsPerChannel = sizeof(Float32) * 8;
audioFormat.mChannelsPerFrame = 1; // Mono
audioFormat.mBytesPerFrame = audioFormat.mChannelsPerFrame * sizeof(Float32); // == sizeof(Float32)
audioFormat.mFramesPerPacket = 1;
audioFormat.mBytesPerPacket = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame; // = sizeof(Float32)
// 3) Apply audio format to the Extended Audio File
ExtAudioFileSetProperty(
fileRef,
kExtAudioFileProperty_ClientDataFormat,
sizeof (AudioStreamBasicDescription), //= audioFormat
&audioFormat);
int numSamples = 1024; //How many samples to read in at a time
UInt32 sizePerPacket = audioFormat.mBytesPerPacket; // = sizeof(Float32) = 32bytes
UInt32 packetsPerBuffer = numSamples;
UInt32 outputBufferSize = packetsPerBuffer * sizePerPacket;
// So the lvalue of outputBuffer is the memory location where we have reserved space
UInt8 *outputBuffer = (UInt8 *)malloc(sizeof(UInt8 *) * outputBufferSize);
NSLog(#"outputBufferSize - %llu", theFileLengthInFrames);
float* total = malloc(theFileLengthInFrames * sizeof(float));
*size = theFileLengthInFrames;
AudioBufferList convertedData;
convertedData.mNumberBuffers = 1; // Set this to 1 for mono
convertedData.mBuffers[0].mNumberChannels = audioFormat.mChannelsPerFrame; //also = 1
convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
convertedData.mBuffers[0].mData = outputBuffer; //
int totalBytes = 0;
UInt32 frameCount = numSamples;
while (frameCount > 0) {
ExtAudioFileRead(fileRef, &frameCount, &convertedData);
if (frameCount > 0) {
AudioBuffer audioBuffer = convertedData.mBuffers[0];
float *samplesAsCArray = (float *)audioBuffer.mData;
memcpy(total + totalBytes, samplesAsCArray, frameCount * sizeof(float));
totalBytes += frameCount;
}
}
return total;
}
There are only a few ways to test that I can think of:
Compare the data you've loaded to data loaded by something you know works
Play the audio data back out somehow (probably using an AudioQueue)

OpenAL alSourceUnqueueBuffers & alSourceUnqueueBuffers

everyone , I have a problem about the API-- alSourceUnqueueBuffers when I use the OpenAL Libaray.
My problem as follows:
1.I play a pcm-music though streaming mechanism.
2.The application can queue up one or multiple buffer names using alSourceQueueBuffers.
when a buffer has been processed. I want to fill new audio data in my function: getSourceState . but when I use the API of OpenAL alSourceUnqueueBuffers. it returns an error
--- AL_INVALID_OPERATION . I do this as the document about the OpenAL.
so I test a way to solve this problem. I use alSourceStop(source) before the api alSourceUnqueueBuffers, an use alSourcePlay(source) after i filled new data though
alBufferData & alSourceQueueBuffers. but it is bad. because It breaks down the music.
who can help me to find this problem ?
and where i can find more information and method about openAL?
I am waiting for your help . thanks , everyone.
so my code as follows:
.h:
#interface myPlayback : NSObject
{
ALuint source;
ALuint * buffers;
ALCcontext* context;
ALCdevice* device;
unsigned long long offset;
ALenum m_format;
ALsizei m_freq;
void* data;
}
#end
.m
- (void)initOpenAL
{
ALenum error;
// Create a new OpenAL Device
// Pass NULL to specify the system’s default output device
device = alcOpenDevice(NULL);
if (device != NULL)
{
// Create a new OpenAL Context
// The new context will render to the OpenAL Device just created
context = alcCreateContext(device, 0);
if (context != NULL)
{
// Make the new context the Current OpenAL Context
alcMakeContextCurrent(context);
// Create some OpenAL Buffer Objects
buffers = (ALuint*)malloc(sizeof(ALuint) * 5);
alGenBuffers(5, buffers);
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"Error Generating Buffers: %x", error);
exit(1);
}
// Create some OpenAL Source Objects
alGenSources(1, &source);
if(alGetError() != AL_NO_ERROR)
{
NSLog(#"Error generating sources! %x\n", error);
exit(1);
}
}
}
// clear any errors
alGetError();
[self initBuffer];
[self initSource];
}
- (void) initBuffer
{
ALenum error = AL_NO_ERROR;
ALenum format;
ALsizei size;
ALsizei freq;
NSBundle* bundle = [NSBundle mainBundle];
// get some audio data from a wave file
CFURLRef fileURL = (CFURLRef)[[NSURL fileURLWithPath:[bundle pathForResource:#"4" ofType:#"caf"]] retain];
if (fileURL)
{
data = MyGetOpenALAudioData(fileURL, &size, &format, &freq);
CFRelease(fileURL);
m_freq = freq;
m_format = format;
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"error loading sound: %x\n", error);
exit(1);
}
alBufferData(buffers[0], format, data, READ_SIZE , freq);
offset += READ_SIZE;
alBufferData(buffers[1], format, data + offset, READ_SIZE, freq);
offset += READ_SIZE;
alBufferData(buffers[2], format, data + offset, READ_SIZE, freq);
offset += READ_SIZE;
alBufferData(buffers[3], format, data + offset, READ_SIZE, freq);
offset += READ_SIZE;
alBufferData(buffers[4], format, data + offset, READ_SIZE, freq);
offset += READ_SIZE;
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"error attaching audio to buffer: %x\n", error);
}
}
else
NSLog(#"Could not find file!\n");
}
- (void) initSource
{
ALenum error = AL_NO_ERROR;
alGetError(); // Clear the error
// Turn Looping ON
alSourcei(source, AL_LOOPING, AL_TRUE);
// Set Source Position
float sourcePosAL[] = {sourcePos.x, kDefaultDistance, sourcePos.y};
alSourcefv(source, AL_POSITION, sourcePosAL);
// Set Source Reference Distance
alSourcef(source, AL_REFERENCE_DISTANCE, 50.0f);
alSourceQueueBuffers(source, 5, buffers);
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"Error attaching buffer to source: %x\n", error);
exit(1);
}
}
- (void)startSound
{
ALenum error;
NSLog(#"Start!\n");
// Begin playing our source file
alSourcePlay(source);
if((error = alGetError()) != AL_NO_ERROR) {
NSLog(#"error starting source: %x\n", error);
} else {
// Mark our state as playing (the view looks at this)
self.isPlaying = YES;
}
while (1) {
[self getSourceState];
}
}
-(void)getSourceState
{
int queued;
int processed;
int state;
alGetSourcei(source, AL_BUFFERS_QUEUED, &queued);
alGetSourcei(source, AL_BUFFERS_PROCESSED, &processed);
alGetSourcei(source, AL_SOURCE_STATE, &state);
NSLog(#"%d", queued);
NSLog(#"%d", processed);
NSLog(#"===================================");
while (processed > 0) {
for (int i = 0; i < processed; ++i) {
ALuint buf;
alGetError();
// alSourceStop(source);
ALenum y = alGetError();
NSLog(#"%d", y);
alSourceUnqueueBuffers(source, 1, &buf);
ALenum i = alGetError();
NSLog(#"%d", i);
processed --;
alBufferData(buf, m_format, data + offset, READ_SIZE, m_freq);
ALenum j = alGetError();
NSLog(#"%d", j);
alSourceQueueBuffers(source, 1, &buf);
ALenum k = alGetError();
NSLog(#"%d", k);
offset += READ_SIZE;
// alSourcePlay(source);
}
}
// [self getSourceState];
}
I found the reason about the problem.
the reason I turn Looping ON : alSourcei(source, AL_LOOPING, AL_TRUE);
if you set this , when the source processed a buffer, you want to fill new data or delete the buffer from the source. you will get the error.

Using AVMutableAudioMix to adjust volumes for tracks within asset

I'm applying an AVMutableAudioMix to a asset I've created, the asset generally consists of 3-5 audio tracks (no video). The goal is to add several volume commands throughout the play time, ie I'd like to set the volume to 0.1 at 1 seconds, 0.5 at 2 seconds, then 0.1 or whatever at 3 seconds. I'm just now trying to do this with an AVPlayer but will also later use it when exporting the AVSession to a file. The problem is that it only seems to care about the first volume command, and seem to ignore all later volume commands. If the first command is to set the volume to 0.1, that will be the permanent volume for this track for the rest of this asset. Despite it really looks like you should be able to add any number of these commands, seeing as the "inputParameters" member of AVMutableAudioMix is really an NSArray which is the series of AVMutableAudioMixInputParameter's. Anyone figured this out?
Edit: I figured this partly out. I'm able to add several volume changes throughout a certain track. But the timings appear way off, I'm not sure how to fix that. For example setting the volume to 0.0 at 5 seconds, then 1.0 at 10 seconds and then back to 0.0 at 15 seconds would make you assume the volume would go on and off promptly at those timings, but the results are always very unpredictable, with ramping of sounds going on, and sometimes working (with sudden volume changes as expected from setVolume). If anyone got the AudioMix to work, please provide an example.
The code I use to change the track volume is:
AVURLAsset *soundTrackAsset = [[AVURLAsset alloc]initWithURL:trackUrl options:nil];
AVMutableAudioMixInputParameters *audioInputParams = [AVMutableAudioMixInputParameters audioMixInputParameters];
[audioInputParams setVolume:0.5 atTime:kCMTimeZero];
[audioInputParams setTrackID:[[[soundTrackAsset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0] trackID]];
audioMix = [AVMutableAudioMix audioMix];
audioMix.inputParameters = [NSArray arrayWithObject:audioInputParams];
Don't forget to add the audiomix to your AVAssetExportSession
exportSession.audioMix = audioMix;
However, I notice it does not work with all formats so You can use this function to change the volume level of an stored file if you keep having issues with AVFoundation. However, this function could be quite slow.
-(void) ScaleAudioFileAmplitude:(NSURL *)theURL: (float) ampScale {
OSStatus err = noErr;
ExtAudioFileRef audiofile;
ExtAudioFileOpenURL((CFURLRef)theURL, &audiofile);
assert(audiofile);
// get some info about the file's format.
AudioStreamBasicDescription fileFormat;
UInt32 size = sizeof(fileFormat);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileDataFormat, &size, &fileFormat);
// we'll need to know what type of file it is later when we write
AudioFileID aFile;
size = sizeof(aFile);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_AudioFile, &size, &aFile);
AudioFileTypeID fileType;
size = sizeof(fileType);
err = AudioFileGetProperty(aFile, kAudioFilePropertyFileFormat, &size, &fileType);
// tell the ExtAudioFile API what format we want samples back in
AudioStreamBasicDescription clientFormat;
bzero(&clientFormat, sizeof(clientFormat));
clientFormat.mChannelsPerFrame = fileFormat.mChannelsPerFrame;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = clientFormat.mBytesPerFrame;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBitsPerChannel = 32;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mSampleRate = fileFormat.mSampleRate;
clientFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved;
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
// find out how many frames we need to read
SInt64 numFrames = 0;
size = sizeof(numFrames);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileLengthFrames, &size, &numFrames);
// create the buffers for reading in data
AudioBufferList *bufferList = malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * (clientFormat.mChannelsPerFrame - 1));
bufferList->mNumberBuffers = clientFormat.mChannelsPerFrame;
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
bufferList->mBuffers[ii].mDataByteSize = sizeof(float) * numFrames;
bufferList->mBuffers[ii].mNumberChannels = 1;
bufferList->mBuffers[ii].mData = malloc(bufferList->mBuffers[ii].mDataByteSize);
}
// read in the data
UInt32 rFrames = (UInt32)numFrames;
err = ExtAudioFileRead(audiofile, &rFrames, bufferList);
// close the file
err = ExtAudioFileDispose(audiofile);
// process the audio
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
float *fBuf = (float *)bufferList->mBuffers[ii].mData;
for (int jj=0; jj < rFrames; ++jj) {
*fBuf = *fBuf * ampScale;
fBuf++;
}
}
// open the file for writing
err = ExtAudioFileCreateWithURL((CFURLRef)theURL, fileType, &fileFormat, NULL, kAudioFileFlags_EraseFile, &audiofile);
// tell the ExtAudioFile API what format we'll be sending samples in
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
// write the data
err = ExtAudioFileWrite(audiofile, rFrames, bufferList);
// close the file
ExtAudioFileDispose(audiofile);
// destroy the buffers
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
free(bufferList->mBuffers[ii].mData);
}
free(bufferList);
bufferList = NULL;
}
Please also note that you may need to fine tune the ampScale you want depending where your volume value is coming from. The system volume goes from 0 to 1 and can be obtained by calling AudioSessionGetProperty
Float32 volume;
UInt32 dataSize = sizeof(Float32);
AudioSessionGetProperty (
kAudioSessionProperty_CurrentHardwareOutputVolume,
&dataSize,
&volume
);
The Audio Extension Toolbox function doesn't quite work anymore as is due to API changes. It now requires you to setup a category. When setting the export properties I was getting an error code of '?cat' (which the NSError will print out in decimal).
Here is the code that works now in iOS 5.1. It is incredibly slow too, just by looking I'd say several times slower. It is also memory intensive since it appear to load the file into memory, which generates memory warnings for 10MB mp3 files.
-(void) scaleAudioFileAmplitude:(NSURL *)theURL withAmpScale:(float) ampScale
{
OSStatus err = noErr;
ExtAudioFileRef audiofile;
ExtAudioFileOpenURL((CFURLRef)theURL, &audiofile);
assert(audiofile);
// get some info about the file's format.
AudioStreamBasicDescription fileFormat;
UInt32 size = sizeof(fileFormat);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileDataFormat, &size, &fileFormat);
// we'll need to know what type of file it is later when we write
AudioFileID aFile;
size = sizeof(aFile);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_AudioFile, &size, &aFile);
AudioFileTypeID fileType;
size = sizeof(fileType);
err = AudioFileGetProperty(aFile, kAudioFilePropertyFileFormat, &size, &fileType);
// tell the ExtAudioFile API what format we want samples back in
AudioStreamBasicDescription clientFormat;
bzero(&clientFormat, sizeof(clientFormat));
clientFormat.mChannelsPerFrame = fileFormat.mChannelsPerFrame;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = clientFormat.mBytesPerFrame;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBitsPerChannel = 32;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mSampleRate = fileFormat.mSampleRate;
clientFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved;
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
// find out how many frames we need to read
SInt64 numFrames = 0;
size = sizeof(numFrames);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileLengthFrames, &size, &numFrames);
// create the buffers for reading in data
AudioBufferList *bufferList = malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * (clientFormat.mChannelsPerFrame - 1));
bufferList->mNumberBuffers = clientFormat.mChannelsPerFrame;
//printf("bufferList->mNumberBuffers = %lu \n\n", bufferList->mNumberBuffers);
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
bufferList->mBuffers[ii].mDataByteSize = sizeof(float) * numFrames;
bufferList->mBuffers[ii].mNumberChannels = 1;
bufferList->mBuffers[ii].mData = malloc(bufferList->mBuffers[ii].mDataByteSize);
}
// read in the data
UInt32 rFrames = (UInt32)numFrames;
err = ExtAudioFileRead(audiofile, &rFrames, bufferList);
// close the file
err = ExtAudioFileDispose(audiofile);
// process the audio
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
float *fBuf = (float *)bufferList->mBuffers[ii].mData;
for (int jj=0; jj < rFrames; ++jj) {
*fBuf = *fBuf * ampScale;
fBuf++;
}
}
// open the file for writing
err = ExtAudioFileCreateWithURL((CFURLRef)theURL, fileType, &fileFormat, NULL, kAudioFileFlags_EraseFile, &audiofile);
NSError *error = NULL;
/*************************** You Need This Now ****************************/
AVAudioSession *session = [AVAudioSession sharedInstance];
[session setCategory:AVAudioSessionCategoryAudioProcessing error:&error];
/************************* End You Need This Now **************************/
// tell the ExtAudioFile API what format we'll be sending samples in
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
error = [NSError errorWithDomain:NSOSStatusErrorDomain
code:err
userInfo:nil];
NSLog(#"Error: %#", [error description]);
// write the data
err = ExtAudioFileWrite(audiofile, rFrames, bufferList);
// close the file
ExtAudioFileDispose(audiofile);
// destroy the buffers
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
free(bufferList->mBuffers[ii].mData);
}
free(bufferList);
bufferList = NULL;
}
Thanks for the help provided in this post.
I just would like to add one thing as you should restore the AVAudioSession back to what it was or you'll end up not playing anything.
AVAudioSession *session = [AVAudioSession sharedInstance];
NSString *originalSessionCategory = [session category];
[session setCategory:AVAudioSessionCategoryAudioProcessing error:&error];
...
...
// restore category
[session setCategory:originalSessionCategory error:&error];
if(error)
NSLog(#"%#",[error localizedDescription]);
Cheers
For Setting the different volumes of Mutable Tracks you can use below Code
self.audioMix = [AVMutableAudioMix audioMix];
AVMutableAudioMixInputParameters *audioInputParams = [AVMutableAudioMixInputParameters audioMixInputParameters];
[audioInputParams setVolume:0.1 atTime:kCMTimeZero];
[audioInputParams setVolume:0.1 atTime:kCMTimeZero];
audioInputParams.trackID = compositionAudioTrack2.trackID;
AVMutableAudioMixInputParameters *audioInputParams1 = [AVMutableAudioMixInputParameters audioMixInputParameters];
[audioInputParams1 setVolume:0.9 atTime:kCMTimeZero];
audioInputParams1.trackID = compositionAudioTrack1.trackID;
AVMutableAudioMixInputParameters *audioInputParams2 = [AVMutableAudioMixInputParameters audioMixInputParameters];
[audioInputParams2 setVolume:0.3 atTime:kCMTimeZero];
audioInputParams2.trackID = compositionAudioTrack.trackID;
self.audioMix.inputParameters =[NSArray arrayWithObjects:audioInputParams,audioInputParams1,audioInputParams2, nil];

How can I get AAC encoding with ExtAudioFile on iOS to work?

I need to convert a WAVE file into an AAC encoded M4A file on iOS. I'm aware that AAC encoding is not supported on older devices or in the simulator. I'm testing that before I run the code. But I still can't get it to work.
I looked into Apple's very own iPhoneExtAudioFileConvertTest example and I thought I followed it exactly, but still no luck!
Currently, I get a -50 (= error in user parameter list) while trying to set the client format on the destination file. On the source file, it works.
Below is my code. Any help is very much appreciated, thanks!
UInt32 size;
// Open a source audio file.
ExtAudioFileRef sourceAudioFile;
ExtAudioFileOpenURL( (CFURLRef)sourceURL, &sourceAudioFile );
// Get the source data format
AudioStreamBasicDescription sourceFormat;
size = sizeof( sourceFormat );
result = ExtAudioFileGetProperty( sourceAudioFile, kExtAudioFileProperty_FileDataFormat, &size, &sourceFormat );
// Define the output format (AAC).
AudioStreamBasicDescription outputFormat;
outputFormat.mFormatID = kAudioFormatMPEG4AAC;
outputFormat.mSampleRate = 44100;
outputFormat.mChannelsPerFrame = 2;
// Use AudioFormat API to fill out the rest of the description.
size = sizeof( outputFormat );
AudioFormatGetProperty( kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat);
// Make a destination audio file with this output format.
ExtAudioFileRef destAudioFile;
ExtAudioFileCreateWithURL( (CFURLRef)destURL, kAudioFileM4AType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &destAudioFile );
// Create canonical PCM client format.
AudioStreamBasicDescription clientFormat;
clientFormat.mSampleRate = sourceFormat.mSampleRate;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
clientFormat.mChannelsPerFrame = 2;
clientFormat.mBitsPerChannel = 16;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = 4;
clientFormat.mFramesPerPacket = 1;
// Set the client format in source and destination file.
size = sizeof( clientFormat );
ExtAudioFileSetProperty( sourceAudioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat );
size = sizeof( clientFormat );
ExtAudioFileSetProperty( destAudioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat );
// Make a buffer
int bufferSizeInFrames = 8000;
int bufferSize = ( bufferSizeInFrames * sourceFormat.mBytesPerFrame );
UInt8 * buffer = (UInt8 *)malloc( bufferSize );
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mNumberChannels = clientFormat.mChannelsPerFrame;
bufferList.mBuffers[0].mData = buffer;
bufferList.mBuffers[0].mDataByteSize = ( bufferSize );
while( TRUE )
{
// Try to fill the buffer to capacity.
UInt32 framesRead = bufferSizeInFrames;
ExtAudioFileRead( sourceAudioFile, &framesRead, &bufferList );
// 0 frames read means EOF.
if( framesRead == 0 )
break;
// Write.
ExtAudioFileWrite( destAudioFile, framesRead, &bufferList );
}
free( buffer );
// Close the files.
ExtAudioFileDispose( sourceAudioFile );
ExtAudioFileDispose( destAudioFile );
Answered my own question: I had to pass this problem to my colleague and he got it to work! I never had the chance to analyze my original problem but I thought, I'd post it here for the sake of completeness. The following method is called from within an NSThread. Parameters are set via the 'threadDictionary' and he created a custom delegate to transmit progress feedback (sorry, SO doesn't understand the formatting properly, the following is supposed to be one block of method implementation):
- (void)encodeToAAC
{
RXAudioEncoderStatusType encoderStatus;
OSStatus result = noErr;
BOOL success = NO;
BOOL cancelled = NO;
UInt32 size;
ExtAudioFileRef sourceAudioFile,destAudioFile;
AudioStreamBasicDescription sourceFormat,outputFormat, clientFormat;
SInt64 totalFrames;
unsigned long long encodedBytes, totalBytes;
int bufferSizeInFrames, bufferSize;
UInt8 * buffer;
AudioBufferList bufferList;
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
NSFileManager * fileManager = [[[NSFileManager alloc] init] autorelease];
NSMutableDictionary * threadDict = [[NSThread currentThread] threadDictionary];
NSObject<RXAudioEncodingDelegate> * delegate = (NSObject<RXAudioEncodingDelegate> *)[threadDict objectForKey:#"Delegate"];
NSString *sourcePath = (NSString *)[threadDict objectForKey:#"SourcePath"];
NSString *destPath = (NSString *)[threadDict objectForKey:#"DestinationPath"];
NSURL * sourceURL = [NSURL fileURLWithPath:sourcePath];
NSURL * destURL = [NSURL fileURLWithPath:destPath];
// Open a source audio file.
result = ExtAudioFileOpenURL( (CFURLRef)sourceURL, &sourceAudioFile );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileOpenURL: %ld", result );
goto bailout;
}
// Get the source data format
size = sizeof( sourceFormat );
result = ExtAudioFileGetProperty( sourceAudioFile, kExtAudioFileProperty_FileDataFormat, &size, &sourceFormat );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileGetProperty: %ld", result );
goto bailout;
}
// Define the output format (AAC).
memset(&outputFormat, 0, sizeof(outputFormat));
outputFormat.mFormatID = kAudioFormatMPEG4AAC;
outputFormat.mSampleRate = 44100;
outputFormat.mFormatFlags = kMPEG4Object_AAC_Main;
outputFormat.mChannelsPerFrame = 2;
outputFormat.mBitsPerChannel = 0;
outputFormat.mBytesPerFrame = 0;
outputFormat.mBytesPerPacket = 0;
outputFormat.mFramesPerPacket = 1024;
// Use AudioFormat API to fill out the rest of the description.
//size = sizeof( outputFormat );
//AudioFormatGetProperty( kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat);
// Make a destination audio file with this output format.
result = ExtAudioFileCreateWithURL( (CFURLRef)destURL, kAudioFileM4AType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &destAudioFile );
if( result != noErr )
{
DLog( #"Error creating destination file: %ld", result );
goto bailout;
}
// Create the canonical PCM client format.
memset(&clientFormat, 0, sizeof(clientFormat));
clientFormat.mSampleRate = sourceFormat.mSampleRate;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; //kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
clientFormat.mChannelsPerFrame = 2;
clientFormat.mBitsPerChannel = 16;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = 4;
clientFormat.mFramesPerPacket = 1;
// Set the client format in source and destination file.
size = sizeof( clientFormat );
result = ExtAudioFileSetProperty( sourceAudioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat );
if( result != noErr )
{
DLog( #"Error while setting client format in source file: %ld", result );
goto bailout;
}
size = sizeof( clientFormat );
result = ExtAudioFileSetProperty( destAudioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat );
if( result != noErr )
{
DLog( #"Error while setting client format in destination file: %ld", result );
goto bailout;
}
// Make a buffer
bufferSizeInFrames = 8000;
bufferSize = ( bufferSizeInFrames * sourceFormat.mBytesPerFrame );
buffer = (UInt8 *)malloc( bufferSize );
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mNumberChannels = clientFormat.mChannelsPerFrame;
bufferList.mBuffers[0].mData = buffer;
bufferList.mBuffers[0].mDataByteSize = ( bufferSize );
// Obtain total number of audio frames to encode
size = sizeof( totalFrames );
result = ExtAudioFileGetProperty( sourceAudioFile, kExtAudioFileProperty_FileLengthFrames, &size, &totalFrames );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileGetProperty, could not get kExtAudioFileProperty_FileLengthFrames from sourceFile: %ld", result );
goto bailout;
}
encodedBytes = 0;
totalBytes = totalFrames * sourceFormat.mBytesPerFrame;
[threadDict setValue:[NSValue value:&totalBytes withObjCType:#encode(unsigned long long)] forKey:#"TotalBytes"];
if (delegate != nil)
[self performSelectorOnMainThread:#selector(didStartEncoding) withObject:nil waitUntilDone:NO];
while( TRUE )
{
// Try to fill the buffer to capacity.
UInt32 framesRead = bufferSizeInFrames;
result = ExtAudioFileRead( sourceAudioFile, &framesRead, &bufferList );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileRead: %ld", result );
success = NO;
break;
}
// 0 frames read means EOF.
if( framesRead == 0 ) {
success = YES;
break;
}
// Write.
result = ExtAudioFileWrite( destAudioFile, framesRead, &bufferList );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileWrite: %ld", result );
success = NO;
break;
}
encodedBytes += framesRead * sourceFormat.mBytesPerFrame;
if (delegate != nil)
[self performSelectorOnMainThread:#selector(didEncodeBytes:) withObject:[NSValue value:&encodedBytes withObjCType:#encode(unsigned long long)] waitUntilDone:NO];
if ([[NSThread currentThread] isCancelled]) {
cancelled = YES;
DLog( #"Encoding was cancelled." );
success = NO;
break;
}
}
free( buffer );
// Close the files.
ExtAudioFileDispose( sourceAudioFile );
ExtAudioFileDispose( destAudioFile );
bailout:
encoderStatus.result = result;
[threadDict setValue:[NSValue value:&encoderStatus withObjCType:#encode(RXAudioEncoderStatusType)] forKey:#"EncodingError"];
// Report to the delegate if one exists
if (delegate != nil)
if (success)
[self performSelectorOnMainThread:#selector(didEncodeFile) withObject:nil waitUntilDone:YES];
else if (cancelled)
[self performSelectorOnMainThread:#selector(encodingCancelled) withObject:nil waitUntilDone:YES];
else
[self performSelectorOnMainThread:#selector(failedToEncodeFile) withObject:nil waitUntilDone:YES];
// Clear the partially encoded file if encoding failed or is cancelled midway
if ((cancelled || !success) && [fileManager fileExistsAtPath:destPath])
[fileManager removeItemAtURL:destURL error:NULL];
[threadDict setValue:[NSNumber numberWithBool:NO] forKey:#"isEncoding"];
[pool release];
}
Are you sure the sample rates match? Can you print the values for clientFormat and outputFormat at the point you’re getting the error? Otherwise I think you might need an AudioConverter.
I tried out the code in Sebastian's answer and while it worked for uncompressed files (aif, wav, caf), it didn't for a lossy compressed file (mp3). I also had an error code of -50, but in ExtAudioFileRead rather than ExtAudioFileSetProperty. From this question I learned that this error signifies a problem with the function parameters. Turns out the buffer for reading the audio file had a size of 0 bytes, a result of this line:
int bufferSize = ( bufferSizeInFrames * sourceFormat.mBytesPerFrame );
Switching it to use the the bytes per frame from clientFormat instead (sourceFormat's value was 0) worked for me:
int bufferSize = ( bufferSizeInFrames * clientFormat.mBytesPerFrame );
This line was also in the question code, but I don't think that was the problem (but I had too much text for a comment).