How can I get AAC encoding with ExtAudioFile on iOS to work? - iphone

I need to convert a WAVE file into an AAC encoded M4A file on iOS. I'm aware that AAC encoding is not supported on older devices or in the simulator. I'm testing that before I run the code. But I still can't get it to work.
I looked into Apple's very own iPhoneExtAudioFileConvertTest example and I thought I followed it exactly, but still no luck!
Currently, I get a -50 (= error in user parameter list) while trying to set the client format on the destination file. On the source file, it works.
Below is my code. Any help is very much appreciated, thanks!
UInt32 size;
// Open a source audio file.
ExtAudioFileRef sourceAudioFile;
ExtAudioFileOpenURL( (CFURLRef)sourceURL, &sourceAudioFile );
// Get the source data format
AudioStreamBasicDescription sourceFormat;
size = sizeof( sourceFormat );
result = ExtAudioFileGetProperty( sourceAudioFile, kExtAudioFileProperty_FileDataFormat, &size, &sourceFormat );
// Define the output format (AAC).
AudioStreamBasicDescription outputFormat;
outputFormat.mFormatID = kAudioFormatMPEG4AAC;
outputFormat.mSampleRate = 44100;
outputFormat.mChannelsPerFrame = 2;
// Use AudioFormat API to fill out the rest of the description.
size = sizeof( outputFormat );
AudioFormatGetProperty( kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat);
// Make a destination audio file with this output format.
ExtAudioFileRef destAudioFile;
ExtAudioFileCreateWithURL( (CFURLRef)destURL, kAudioFileM4AType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &destAudioFile );
// Create canonical PCM client format.
AudioStreamBasicDescription clientFormat;
clientFormat.mSampleRate = sourceFormat.mSampleRate;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
clientFormat.mChannelsPerFrame = 2;
clientFormat.mBitsPerChannel = 16;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = 4;
clientFormat.mFramesPerPacket = 1;
// Set the client format in source and destination file.
size = sizeof( clientFormat );
ExtAudioFileSetProperty( sourceAudioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat );
size = sizeof( clientFormat );
ExtAudioFileSetProperty( destAudioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat );
// Make a buffer
int bufferSizeInFrames = 8000;
int bufferSize = ( bufferSizeInFrames * sourceFormat.mBytesPerFrame );
UInt8 * buffer = (UInt8 *)malloc( bufferSize );
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mNumberChannels = clientFormat.mChannelsPerFrame;
bufferList.mBuffers[0].mData = buffer;
bufferList.mBuffers[0].mDataByteSize = ( bufferSize );
while( TRUE )
{
// Try to fill the buffer to capacity.
UInt32 framesRead = bufferSizeInFrames;
ExtAudioFileRead( sourceAudioFile, &framesRead, &bufferList );
// 0 frames read means EOF.
if( framesRead == 0 )
break;
// Write.
ExtAudioFileWrite( destAudioFile, framesRead, &bufferList );
}
free( buffer );
// Close the files.
ExtAudioFileDispose( sourceAudioFile );
ExtAudioFileDispose( destAudioFile );

Answered my own question: I had to pass this problem to my colleague and he got it to work! I never had the chance to analyze my original problem but I thought, I'd post it here for the sake of completeness. The following method is called from within an NSThread. Parameters are set via the 'threadDictionary' and he created a custom delegate to transmit progress feedback (sorry, SO doesn't understand the formatting properly, the following is supposed to be one block of method implementation):
- (void)encodeToAAC
{
RXAudioEncoderStatusType encoderStatus;
OSStatus result = noErr;
BOOL success = NO;
BOOL cancelled = NO;
UInt32 size;
ExtAudioFileRef sourceAudioFile,destAudioFile;
AudioStreamBasicDescription sourceFormat,outputFormat, clientFormat;
SInt64 totalFrames;
unsigned long long encodedBytes, totalBytes;
int bufferSizeInFrames, bufferSize;
UInt8 * buffer;
AudioBufferList bufferList;
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
NSFileManager * fileManager = [[[NSFileManager alloc] init] autorelease];
NSMutableDictionary * threadDict = [[NSThread currentThread] threadDictionary];
NSObject<RXAudioEncodingDelegate> * delegate = (NSObject<RXAudioEncodingDelegate> *)[threadDict objectForKey:#"Delegate"];
NSString *sourcePath = (NSString *)[threadDict objectForKey:#"SourcePath"];
NSString *destPath = (NSString *)[threadDict objectForKey:#"DestinationPath"];
NSURL * sourceURL = [NSURL fileURLWithPath:sourcePath];
NSURL * destURL = [NSURL fileURLWithPath:destPath];
// Open a source audio file.
result = ExtAudioFileOpenURL( (CFURLRef)sourceURL, &sourceAudioFile );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileOpenURL: %ld", result );
goto bailout;
}
// Get the source data format
size = sizeof( sourceFormat );
result = ExtAudioFileGetProperty( sourceAudioFile, kExtAudioFileProperty_FileDataFormat, &size, &sourceFormat );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileGetProperty: %ld", result );
goto bailout;
}
// Define the output format (AAC).
memset(&outputFormat, 0, sizeof(outputFormat));
outputFormat.mFormatID = kAudioFormatMPEG4AAC;
outputFormat.mSampleRate = 44100;
outputFormat.mFormatFlags = kMPEG4Object_AAC_Main;
outputFormat.mChannelsPerFrame = 2;
outputFormat.mBitsPerChannel = 0;
outputFormat.mBytesPerFrame = 0;
outputFormat.mBytesPerPacket = 0;
outputFormat.mFramesPerPacket = 1024;
// Use AudioFormat API to fill out the rest of the description.
//size = sizeof( outputFormat );
//AudioFormatGetProperty( kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat);
// Make a destination audio file with this output format.
result = ExtAudioFileCreateWithURL( (CFURLRef)destURL, kAudioFileM4AType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &destAudioFile );
if( result != noErr )
{
DLog( #"Error creating destination file: %ld", result );
goto bailout;
}
// Create the canonical PCM client format.
memset(&clientFormat, 0, sizeof(clientFormat));
clientFormat.mSampleRate = sourceFormat.mSampleRate;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; //kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
clientFormat.mChannelsPerFrame = 2;
clientFormat.mBitsPerChannel = 16;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = 4;
clientFormat.mFramesPerPacket = 1;
// Set the client format in source and destination file.
size = sizeof( clientFormat );
result = ExtAudioFileSetProperty( sourceAudioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat );
if( result != noErr )
{
DLog( #"Error while setting client format in source file: %ld", result );
goto bailout;
}
size = sizeof( clientFormat );
result = ExtAudioFileSetProperty( destAudioFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat );
if( result != noErr )
{
DLog( #"Error while setting client format in destination file: %ld", result );
goto bailout;
}
// Make a buffer
bufferSizeInFrames = 8000;
bufferSize = ( bufferSizeInFrames * sourceFormat.mBytesPerFrame );
buffer = (UInt8 *)malloc( bufferSize );
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mNumberChannels = clientFormat.mChannelsPerFrame;
bufferList.mBuffers[0].mData = buffer;
bufferList.mBuffers[0].mDataByteSize = ( bufferSize );
// Obtain total number of audio frames to encode
size = sizeof( totalFrames );
result = ExtAudioFileGetProperty( sourceAudioFile, kExtAudioFileProperty_FileLengthFrames, &size, &totalFrames );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileGetProperty, could not get kExtAudioFileProperty_FileLengthFrames from sourceFile: %ld", result );
goto bailout;
}
encodedBytes = 0;
totalBytes = totalFrames * sourceFormat.mBytesPerFrame;
[threadDict setValue:[NSValue value:&totalBytes withObjCType:#encode(unsigned long long)] forKey:#"TotalBytes"];
if (delegate != nil)
[self performSelectorOnMainThread:#selector(didStartEncoding) withObject:nil waitUntilDone:NO];
while( TRUE )
{
// Try to fill the buffer to capacity.
UInt32 framesRead = bufferSizeInFrames;
result = ExtAudioFileRead( sourceAudioFile, &framesRead, &bufferList );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileRead: %ld", result );
success = NO;
break;
}
// 0 frames read means EOF.
if( framesRead == 0 ) {
success = YES;
break;
}
// Write.
result = ExtAudioFileWrite( destAudioFile, framesRead, &bufferList );
if( result != noErr )
{
DLog( #"Error in ExtAudioFileWrite: %ld", result );
success = NO;
break;
}
encodedBytes += framesRead * sourceFormat.mBytesPerFrame;
if (delegate != nil)
[self performSelectorOnMainThread:#selector(didEncodeBytes:) withObject:[NSValue value:&encodedBytes withObjCType:#encode(unsigned long long)] waitUntilDone:NO];
if ([[NSThread currentThread] isCancelled]) {
cancelled = YES;
DLog( #"Encoding was cancelled." );
success = NO;
break;
}
}
free( buffer );
// Close the files.
ExtAudioFileDispose( sourceAudioFile );
ExtAudioFileDispose( destAudioFile );
bailout:
encoderStatus.result = result;
[threadDict setValue:[NSValue value:&encoderStatus withObjCType:#encode(RXAudioEncoderStatusType)] forKey:#"EncodingError"];
// Report to the delegate if one exists
if (delegate != nil)
if (success)
[self performSelectorOnMainThread:#selector(didEncodeFile) withObject:nil waitUntilDone:YES];
else if (cancelled)
[self performSelectorOnMainThread:#selector(encodingCancelled) withObject:nil waitUntilDone:YES];
else
[self performSelectorOnMainThread:#selector(failedToEncodeFile) withObject:nil waitUntilDone:YES];
// Clear the partially encoded file if encoding failed or is cancelled midway
if ((cancelled || !success) && [fileManager fileExistsAtPath:destPath])
[fileManager removeItemAtURL:destURL error:NULL];
[threadDict setValue:[NSNumber numberWithBool:NO] forKey:#"isEncoding"];
[pool release];
}

Are you sure the sample rates match? Can you print the values for clientFormat and outputFormat at the point you’re getting the error? Otherwise I think you might need an AudioConverter.

I tried out the code in Sebastian's answer and while it worked for uncompressed files (aif, wav, caf), it didn't for a lossy compressed file (mp3). I also had an error code of -50, but in ExtAudioFileRead rather than ExtAudioFileSetProperty. From this question I learned that this error signifies a problem with the function parameters. Turns out the buffer for reading the audio file had a size of 0 bytes, a result of this line:
int bufferSize = ( bufferSizeInFrames * sourceFormat.mBytesPerFrame );
Switching it to use the the bytes per frame from clientFormat instead (sourceFormat's value was 0) worked for me:
int bufferSize = ( bufferSizeInFrames * clientFormat.mBytesPerFrame );
This line was also in the question code, but I don't think that was the problem (but I had too much text for a comment).

Related

How to check results of ExtAudioFileRead?

I'm using ExtAudioFileRead to read WAV file into memory as float * buffer. However, I'm not quite sure about results - when I print them out, I get values from - 1 to + 1(which should be theoretically correct), but how can I be sure that they are correct?
- (float *) readTestFileAndSize: (int *) size
{
CFStringRef str = CFStringCreateWithCString(
NULL,
[[[NSBundle mainBundle] pathForResource: #"25" ofType:#"wav"] UTF8String],
kCFStringEncodingMacRoman
);
CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(
kCFAllocatorDefault,
str,
kCFURLPOSIXPathStyle,
false
);
ExtAudioFileRef fileRef;
ExtAudioFileOpenURL(inputFileURL, &fileRef);
SInt64 theFileLengthInFrames = 0;
// Get the total frame count
UInt32 thePropertySize = sizeof(theFileLengthInFrames);
ExtAudioFileGetProperty(fileRef, kExtAudioFileProperty_FileLengthFrames, &thePropertySize, &theFileLengthInFrames);
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat;
audioFormat.mBitsPerChannel = sizeof(Float32) * 8;
audioFormat.mChannelsPerFrame = 1; // Mono
audioFormat.mBytesPerFrame = audioFormat.mChannelsPerFrame * sizeof(Float32); // == sizeof(Float32)
audioFormat.mFramesPerPacket = 1;
audioFormat.mBytesPerPacket = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame; // = sizeof(Float32)
// 3) Apply audio format to the Extended Audio File
ExtAudioFileSetProperty(
fileRef,
kExtAudioFileProperty_ClientDataFormat,
sizeof (AudioStreamBasicDescription), //= audioFormat
&audioFormat);
int numSamples = 1024; //How many samples to read in at a time
UInt32 sizePerPacket = audioFormat.mBytesPerPacket; // = sizeof(Float32) = 32bytes
UInt32 packetsPerBuffer = numSamples;
UInt32 outputBufferSize = packetsPerBuffer * sizePerPacket;
// So the lvalue of outputBuffer is the memory location where we have reserved space
UInt8 *outputBuffer = (UInt8 *)malloc(sizeof(UInt8 *) * outputBufferSize);
NSLog(#"outputBufferSize - %llu", theFileLengthInFrames);
float* total = malloc(theFileLengthInFrames * sizeof(float));
*size = theFileLengthInFrames;
AudioBufferList convertedData;
convertedData.mNumberBuffers = 1; // Set this to 1 for mono
convertedData.mBuffers[0].mNumberChannels = audioFormat.mChannelsPerFrame; //also = 1
convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
convertedData.mBuffers[0].mData = outputBuffer; //
int totalBytes = 0;
UInt32 frameCount = numSamples;
while (frameCount > 0) {
ExtAudioFileRead(fileRef, &frameCount, &convertedData);
if (frameCount > 0) {
AudioBuffer audioBuffer = convertedData.mBuffers[0];
float *samplesAsCArray = (float *)audioBuffer.mData;
memcpy(total + totalBytes, samplesAsCArray, frameCount * sizeof(float));
totalBytes += frameCount;
}
}
return total;
}
There are only a few ways to test that I can think of:
Compare the data you've loaded to data loaded by something you know works
Play the audio data back out somehow (probably using an AudioQueue)

How to decrypt a file in Objective C/IOS that is encrypted in php?

I have googled too much for this error but found nothing useful.
I am getting a file that is encrypted in php using the following code:
mcrypt_encrypt(MCRYPT_RIJNDAEL_128, $privateencryptkey, base64_encode(file), MCRYPT_MODE_CBC, $hardvector);
I am unable to decrypt it in IOS. I had tried many libraries like NSDATA+CommonCrypto, NSFileManager-AES, NSDATA-aes but i have not got success in decrypting the file.
Following is the objective-C code used:
- (NSData *)AESDecryptWithPassphrase:(NSString *)pass
{
NSMutableData *ret = [NSMutableData dataWithCapacity:[self length]];
unsigned long rk[RKLENGTH(KEYBITS)];
unsigned char key[KEYLENGTH(KEYBITS)];
const char *password = [pass UTF8String];
for (int i = 0; i < sizeof(key); i++)
key[i] = password != 0 ? *password++ : 0;
int nrounds = rijndaelSetupDecrypt(rk, key, KEYBITS);
unsigned char *srcBytes = (unsigned char *)[self bytes];
int index = 0;
while (index < [self length])
{
unsigned char plaintext[16];
unsigned char ciphertext[16];
int j;
for (j = 0; j < sizeof(ciphertext); j++)
{
if (index >= [self length])
break;
ciphertext[j] = srcBytes[index++];
}
rijndaelDecrypt(rk, nrounds, ciphertext, plaintext);
[ret appendBytes:plaintext length:sizeof(plaintext)];
}
return ret;
}
This code works well for text but unable to decrypt files.
When i save the decrypted files then it says the file system error. Those decrypted files cannot be opened on any system, i think the file format is disturbed in the process.
I also tried the following code but no success:
- (NSData *) decryptedDataUsingAlgorithm: (CCAlgorithm) algorithm
key: (id) key // data or string
initializationVector: (id) iv // data or string
options: (CCOptions) options
error: (CCCryptorStatus *) error
{
CCCryptorRef cryptor = NULL;
CCCryptorStatus status = kCCSuccess;
NSParameterAssert([key isKindOfClass: [NSData class]] || [key isKindOfClass: [NSString class]]);
NSParameterAssert(iv == nil || [iv isKindOfClass: [NSData class]] || [iv isKindOfClass: [NSString class]]);
NSMutableData * keyData, * ivData;
if ( [key isKindOfClass: [NSData class]] )
keyData = (NSMutableData *) [key mutableCopy];
else
keyData = [[key dataUsingEncoding: NSUTF8StringEncoding] mutableCopy];
if ( [iv isKindOfClass: [NSString class]] )
ivData = [[iv dataUsingEncoding: NSUTF8StringEncoding] mutableCopy];
else
ivData = (NSMutableData *) [iv mutableCopy]; // data or nil
[keyData autorelease];
[ivData autorelease];
// ensure correct lengths for key and iv data, based on algorithms
FixKeyLengths( algorithm, keyData, ivData );
status = CCCryptorCreate( kCCDecrypt, algorithm, options,
[keyData bytes], [keyData length], [ivData bytes],
&cryptor );
if ( status != kCCSuccess )
{
if ( error != NULL )
*error = status;
return ( nil );
}
NSData * result = [self _runCryptor: cryptor result: &status];
if ( (result == nil) && (error != NULL) )
*error = status;
CCCryptorRelease( cryptor );
return ( result );
}
2nd function from above code:
- (NSData *) _runCryptor: (CCCryptorRef) cryptor result: (CCCryptorStatus *) status
{
size_t bufsize = CCCryptorGetOutputLength( cryptor, (size_t)[self length], true );
void * buf = malloc( bufsize );
size_t bufused = 0;
size_t bytesTotal = 0;
*status = CCCryptorUpdate( cryptor, [self bytes], (size_t)[self length],
buf, bufsize, &bufused );
if ( *status != kCCSuccess )
{
free( buf );
return ( nil );
}
bytesTotal += bufused;
// From Brent Royal-Gordon (Twitter: architechies):
// Need to update buf ptr past used bytes when calling CCCryptorFinal()
*status = CCCryptorFinal( cryptor, buf + bufused, bufsize - bufused, &bufused );
if ( *status != kCCSuccess )
{
free( buf );
return ( nil );
}
bytesTotal += bufused;
return ( [NSData dataWithBytesNoCopy: buf length: bytesTotal] );
}
I haven't been able to solve this for a week...
One thing to note is the file parameter to mcrypt_encrypt, it appears that the file is being base64 encoded prior to encryption (not that it makes any sense), that would imply you would have to base64 decode after decryption.
The other parameters are straight forward:
MCRYPT_RIJNDAEL_128 is AES, 128 with a 128 bit key
MCRYPT_MODE_CBC is cbc mode, the default for CommonCrypto.
The padding to block size is with null characters, rather nonstandard so the non-padded length may be a problem.
Not that you need yet another AES method, this is the one I use:
#import <CommonCrypto/CommonCryptor.h>
+ (NSData *)doCipher:(NSData *)dataIn
iv:(NSData *)iv
key:(NSData *)symmetricKey
context:(CCOperation)encryptOrDecrypt
{
CCCryptorStatus ccStatus = kCCSuccess;
size_t cryptBytes = 0; // Number of bytes moved to buffer.
NSMutableData *dataOut = [NSMutableData dataWithLength:dataIn.length + kCCBlockSizeAES128];
ccStatus = CCCrypt( encryptOrDecrypt,
kCCAlgorithmAES128,
kCCOptionPKCS7Padding,
symmetricKey.bytes,
kCCKeySizeAES128,
iv.bytes,
dataIn.bytes,
dataIn.length,
dataOut.mutableBytes,
dataOut.length,
&cryptBytes);
if (ccStatus != kCCSuccess) {
NSLog(#"CCCrypt status: %d", ccStatus);
}
dataOut.length = cryptBytes;
return dataOut;
}
// Also add Security.framework to your project.
Note that it expects NSData input and the padding is specified as standard PKCS.
See CommonCryptor.h

Video from Set of Images have RGB problem

HI Guys,
I used ffmpeg for creating video from sequence of Images. Following is My coding.
-(void)imageToMov:(NSString*)videoName imageNumber:(int)imageNumber{
[[NSFileManager defaultManager]createDirectoryAtPath:[Utilities documentsPath:[NSString stringWithFormat:#"CoachedFiles/"]] attributes:nil];
[[NSFileManager defaultManager]createDirectoryAtPath:[Utilities documentsPath:[NSString stringWithFormat:#"CoachedFiles/%#/",videoName]] attributes:nil];
//创建文件
[[NSFileManager defaultManager]createFileAtPath:[Utilities documentsPath:[NSString stringWithFormat:#"CoachedFiles/%#/%#.mov",videoName,videoName]] contents:nil attributes:nil];
//[[NSFileManager defaultManager]createFileAtPath:[Utilities documentsPath:[NSString stringWithFormat:#"temp/temp.mov"]] contents:nil attributes:nil];
const char *outfilename = [[Utilities documentsPath:[NSString stringWithFormat:#"CoachedFiles/%#/%#.mov",videoName,videoName]]UTF8String];
UIImage * tempImage = [UIImage imageWithContentsOfFile:[Utilities documentsPath:[NSString stringWithFormat:#"temp/temp0000.jpeg"]]];
AVFormatContext *pFormatCtxEnc;
AVCodecContext *pCodecCtxEnc;
AVCodec *pCodecEnc;
AVFrame *pFrameEnc;
AVOutputFormat *pOutputFormat;
AVStream *video_st;
int i;
int outbuf_size;
uint8_t *outbuf;
int out_size;
// Register all formats and codecs
av_register_all();
// auto detect the output format from the name. default is mpeg.
pOutputFormat = av_guess_format(NULL, outfilename, NULL);
if (pOutputFormat == NULL)
return;
// allocate the output media context
pFormatCtxEnc = avformat_alloc_context();
if (pFormatCtxEnc == NULL)
return;
pFormatCtxEnc->oformat = pOutputFormat;
sprintf(pFormatCtxEnc->filename, "%s", outfilename);
video_st = av_new_stream(pFormatCtxEnc, 0); // 0 for video
pCodecCtxEnc = video_st->codec;
pCodecCtxEnc->codec_id = pOutputFormat->video_codec;
pCodecCtxEnc->codec_type = CODEC_TYPE_VIDEO;
// put sample parameters
pCodecCtxEnc->bit_rate = 500000;
// resolution must be a multiple of two
pCodecCtxEnc->width = tempImage.size.width;
pCodecCtxEnc->height = tempImage.size.height;
// frames per second
pCodecCtxEnc->time_base.den = 1;
pCodecCtxEnc->time_base.num = 1;
pCodecCtxEnc->pix_fmt = PIX_FMT_YUV420P;
pCodecCtxEnc->gop_size = 12; /* emit one intra frame every ten frames */
if (pCodecCtxEnc->codec_id == CODEC_ID_MPEG1VIDEO){
/* needed to avoid using macroblocks in which some coeffs overflow
this doesnt happen with normal video, it just happens here as the
motion of the chroma plane doesnt match the luma plane */
pCodecCtxEnc->mb_decision=2;
}
// some formats want stream headers to be seperate
if(!strcmp(pFormatCtxEnc->oformat->name, "mp4") || !strcmp(pFormatCtxEnc->oformat->name, "mov") || !strcmp(pFormatCtxEnc->oformat->name, "3gp"))
pCodecCtxEnc->flags |= CODEC_FLAG_GLOBAL_HEADER;
// set the output parameters (must be done even if no parameters).
if (av_set_parameters(pFormatCtxEnc, NULL) < 0) {
return;
}
// find the video encoder
pCodecEnc = avcodec_find_encoder(pCodecCtxEnc->codec_id);
if (pCodecEnc == NULL)
return;
// open it
if (avcodec_open(pCodecCtxEnc, pCodecEnc) < 0) {
return;
}
if (!(pFormatCtxEnc->oformat->flags & AVFMT_RAWPICTURE)) {
/* allocate output buffer */
/* XXX: API change will be done */
outbuf_size = 500000;
outbuf = av_malloc(outbuf_size);
}
pFrameEnc= avcodec_alloc_frame();
// open the output file, if needed
if (!(pOutputFormat->flags & AVFMT_NOFILE)) {
if (url_fopen(&pFormatCtxEnc->pb, outfilename, URL_WRONLY) < 0) {
//fprintf(stderr, "Could not open '%s'\n", filename);
return;
}
}
// write the stream header, if any
av_write_header(pFormatCtxEnc);
// Read frames and save frames to disk
int size = pCodecCtxEnc->width * pCodecCtxEnc->height;
uint8_t * picture_buf;
picture_buf = malloc((size * 3) / 2);
pFrameEnc->data[0] = picture_buf;
pFrameEnc->data[1] = pFrameEnc->data[0] + size;
pFrameEnc->data[2] = pFrameEnc->data[1] + size / 4;
pFrameEnc->linesize[0] = pCodecCtxEnc->width;
pFrameEnc->linesize[1] = pCodecCtxEnc->width / 2;
pFrameEnc->linesize[2] = pCodecCtxEnc->width / 2;
for (i=0;i<imageNumber;i++){
NSString *imgName = [NSString stringWithFormat:#"temp/temp%04d.jpeg",i];
NSLog(#"%#",imgName);
UIImage * image = [UIImage imageWithContentsOfFile:[Utilities documentsPath:imgName]];
[imgName release];
//创建avpicture
AVPicture pict;
//格式保持bgra,后面是输入图片的长宽
avpicture_alloc(&pict, PIX_FMT_BGRA, image.size.width, image.size.height);
//读取图片数据
CGImageRef cgimage = [image CGImage];
CGDataProviderRef dataProvider = CGImageGetDataProvider(cgimage);
CFDataRef data = CGDataProviderCopyData(dataProvider);
const uint8_t * imagedata = CFDataGetBytePtr(data);
//向avpicture填充数据
avpicture_fill(&pict, imagedata, PIX_FMT_BGRA, image.size.width, image.size.height);
//定义转换格式,从bgra转到yuv420
static int sws_flags = SWS_FAST_BILINEAR;
struct SwsContext * img_convert_ctx = sws_getContext(image.size.width,
image.size.height,
PIX_FMT_BGRA,
image.size.width,
image.size.height,
PIX_FMT_YUV420P,
sws_flags, NULL, NULL, NULL);
//转换图象数据格式
sws_scale (img_convert_ctx, pict.data, pict.linesize,
0, image.size.height,
pFrameEnc->data, pFrameEnc->linesize);
if (pFormatCtxEnc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
futur for that */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
pkt.data= (uint8_t *)pFrameEnc;
pkt.size= sizeof(AVPicture);
av_write_frame(pFormatCtxEnc, &pkt);
} else {
// encode the image
out_size = avcodec_encode_video(pCodecCtxEnc, outbuf, outbuf_size, pFrameEnc);
// if zero size, it means the image was buffered
if (out_size != 0) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.pts= pCodecCtxEnc->coded_frame->pts;
if(pCodecCtxEnc->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
pkt.data= outbuf;
pkt.size= out_size;
// write the compressed frame in the media file
av_write_frame(pFormatCtxEnc, &pkt);
}
}
}
// get the delayed frames
for(; out_size; i++) {
out_size = avcodec_encode_video(pCodecCtxEnc, outbuf, outbuf_size, NULL);
if (out_size != 0) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.pts= pCodecCtxEnc->coded_frame->pts;
if(pCodecCtxEnc->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
pkt.data= outbuf;
pkt.size= out_size;
// write the compressed frame in the media file
av_write_frame(pFormatCtxEnc, &pkt);
}
}
// Close the codec
//avcodec_close(pCodecCtxDec);
avcodec_close(pCodecCtxEnc);
// Free the YUV frame
//av_free(pFrameDec);
av_free(pFrameEnc);
av_free(outbuf);
// write the trailer, if any
av_write_trailer(pFormatCtxEnc);
// free the streams
for(i = 0; i < pFormatCtxEnc->nb_streams; i++) {
av_freep(&pFormatCtxEnc->streams[i]->codec);
av_freep(&pFormatCtxEnc->streams[i]);
}
if (!(pOutputFormat->flags & AVFMT_NOFILE)) {
/* close the output file */
//comment out this code to fix the record video issue. Kevin 2010-07-11
//url_fclose(&pFormatCtxEnc->pb);
}
/* free the stream */
av_free(pFormatCtxEnc);
if([[NSFileManager defaultManager]fileExistsAtPath:[Utilities documentsPath:[NSString stringWithFormat:#"temp/"]] isDirectory:NULL]){
[[NSFileManager defaultManager] removeItemAtPath:[Utilities documentsPath:[NSString stringWithFormat:#"temp/"]] error:nil];
}
//[self MergeVideoFileWithVideoName:videoName];
[self SaveFileDetails:videoName];
[alertView dismissWithClickedButtonIndex:0 animated:YES];
}
Now the Problem is Video is created successfully and the RGB color is greenish. Please notify my mistake on this coding.
I am not an expert on colors but I think your problem might be in the color space you use on your img_convert_ctx. You are using PIX_FMT_BGRA. That's different colorspace then RGB it's BGR, it has reversed colors. Try using PIX_FMT_RGBA instead.

Using AVMutableAudioMix to adjust volumes for tracks within asset

I'm applying an AVMutableAudioMix to a asset I've created, the asset generally consists of 3-5 audio tracks (no video). The goal is to add several volume commands throughout the play time, ie I'd like to set the volume to 0.1 at 1 seconds, 0.5 at 2 seconds, then 0.1 or whatever at 3 seconds. I'm just now trying to do this with an AVPlayer but will also later use it when exporting the AVSession to a file. The problem is that it only seems to care about the first volume command, and seem to ignore all later volume commands. If the first command is to set the volume to 0.1, that will be the permanent volume for this track for the rest of this asset. Despite it really looks like you should be able to add any number of these commands, seeing as the "inputParameters" member of AVMutableAudioMix is really an NSArray which is the series of AVMutableAudioMixInputParameter's. Anyone figured this out?
Edit: I figured this partly out. I'm able to add several volume changes throughout a certain track. But the timings appear way off, I'm not sure how to fix that. For example setting the volume to 0.0 at 5 seconds, then 1.0 at 10 seconds and then back to 0.0 at 15 seconds would make you assume the volume would go on and off promptly at those timings, but the results are always very unpredictable, with ramping of sounds going on, and sometimes working (with sudden volume changes as expected from setVolume). If anyone got the AudioMix to work, please provide an example.
The code I use to change the track volume is:
AVURLAsset *soundTrackAsset = [[AVURLAsset alloc]initWithURL:trackUrl options:nil];
AVMutableAudioMixInputParameters *audioInputParams = [AVMutableAudioMixInputParameters audioMixInputParameters];
[audioInputParams setVolume:0.5 atTime:kCMTimeZero];
[audioInputParams setTrackID:[[[soundTrackAsset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0] trackID]];
audioMix = [AVMutableAudioMix audioMix];
audioMix.inputParameters = [NSArray arrayWithObject:audioInputParams];
Don't forget to add the audiomix to your AVAssetExportSession
exportSession.audioMix = audioMix;
However, I notice it does not work with all formats so You can use this function to change the volume level of an stored file if you keep having issues with AVFoundation. However, this function could be quite slow.
-(void) ScaleAudioFileAmplitude:(NSURL *)theURL: (float) ampScale {
OSStatus err = noErr;
ExtAudioFileRef audiofile;
ExtAudioFileOpenURL((CFURLRef)theURL, &audiofile);
assert(audiofile);
// get some info about the file's format.
AudioStreamBasicDescription fileFormat;
UInt32 size = sizeof(fileFormat);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileDataFormat, &size, &fileFormat);
// we'll need to know what type of file it is later when we write
AudioFileID aFile;
size = sizeof(aFile);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_AudioFile, &size, &aFile);
AudioFileTypeID fileType;
size = sizeof(fileType);
err = AudioFileGetProperty(aFile, kAudioFilePropertyFileFormat, &size, &fileType);
// tell the ExtAudioFile API what format we want samples back in
AudioStreamBasicDescription clientFormat;
bzero(&clientFormat, sizeof(clientFormat));
clientFormat.mChannelsPerFrame = fileFormat.mChannelsPerFrame;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = clientFormat.mBytesPerFrame;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBitsPerChannel = 32;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mSampleRate = fileFormat.mSampleRate;
clientFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved;
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
// find out how many frames we need to read
SInt64 numFrames = 0;
size = sizeof(numFrames);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileLengthFrames, &size, &numFrames);
// create the buffers for reading in data
AudioBufferList *bufferList = malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * (clientFormat.mChannelsPerFrame - 1));
bufferList->mNumberBuffers = clientFormat.mChannelsPerFrame;
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
bufferList->mBuffers[ii].mDataByteSize = sizeof(float) * numFrames;
bufferList->mBuffers[ii].mNumberChannels = 1;
bufferList->mBuffers[ii].mData = malloc(bufferList->mBuffers[ii].mDataByteSize);
}
// read in the data
UInt32 rFrames = (UInt32)numFrames;
err = ExtAudioFileRead(audiofile, &rFrames, bufferList);
// close the file
err = ExtAudioFileDispose(audiofile);
// process the audio
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
float *fBuf = (float *)bufferList->mBuffers[ii].mData;
for (int jj=0; jj < rFrames; ++jj) {
*fBuf = *fBuf * ampScale;
fBuf++;
}
}
// open the file for writing
err = ExtAudioFileCreateWithURL((CFURLRef)theURL, fileType, &fileFormat, NULL, kAudioFileFlags_EraseFile, &audiofile);
// tell the ExtAudioFile API what format we'll be sending samples in
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
// write the data
err = ExtAudioFileWrite(audiofile, rFrames, bufferList);
// close the file
ExtAudioFileDispose(audiofile);
// destroy the buffers
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
free(bufferList->mBuffers[ii].mData);
}
free(bufferList);
bufferList = NULL;
}
Please also note that you may need to fine tune the ampScale you want depending where your volume value is coming from. The system volume goes from 0 to 1 and can be obtained by calling AudioSessionGetProperty
Float32 volume;
UInt32 dataSize = sizeof(Float32);
AudioSessionGetProperty (
kAudioSessionProperty_CurrentHardwareOutputVolume,
&dataSize,
&volume
);
The Audio Extension Toolbox function doesn't quite work anymore as is due to API changes. It now requires you to setup a category. When setting the export properties I was getting an error code of '?cat' (which the NSError will print out in decimal).
Here is the code that works now in iOS 5.1. It is incredibly slow too, just by looking I'd say several times slower. It is also memory intensive since it appear to load the file into memory, which generates memory warnings for 10MB mp3 files.
-(void) scaleAudioFileAmplitude:(NSURL *)theURL withAmpScale:(float) ampScale
{
OSStatus err = noErr;
ExtAudioFileRef audiofile;
ExtAudioFileOpenURL((CFURLRef)theURL, &audiofile);
assert(audiofile);
// get some info about the file's format.
AudioStreamBasicDescription fileFormat;
UInt32 size = sizeof(fileFormat);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileDataFormat, &size, &fileFormat);
// we'll need to know what type of file it is later when we write
AudioFileID aFile;
size = sizeof(aFile);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_AudioFile, &size, &aFile);
AudioFileTypeID fileType;
size = sizeof(fileType);
err = AudioFileGetProperty(aFile, kAudioFilePropertyFileFormat, &size, &fileType);
// tell the ExtAudioFile API what format we want samples back in
AudioStreamBasicDescription clientFormat;
bzero(&clientFormat, sizeof(clientFormat));
clientFormat.mChannelsPerFrame = fileFormat.mChannelsPerFrame;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = clientFormat.mBytesPerFrame;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBitsPerChannel = 32;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mSampleRate = fileFormat.mSampleRate;
clientFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved;
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
// find out how many frames we need to read
SInt64 numFrames = 0;
size = sizeof(numFrames);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileLengthFrames, &size, &numFrames);
// create the buffers for reading in data
AudioBufferList *bufferList = malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * (clientFormat.mChannelsPerFrame - 1));
bufferList->mNumberBuffers = clientFormat.mChannelsPerFrame;
//printf("bufferList->mNumberBuffers = %lu \n\n", bufferList->mNumberBuffers);
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
bufferList->mBuffers[ii].mDataByteSize = sizeof(float) * numFrames;
bufferList->mBuffers[ii].mNumberChannels = 1;
bufferList->mBuffers[ii].mData = malloc(bufferList->mBuffers[ii].mDataByteSize);
}
// read in the data
UInt32 rFrames = (UInt32)numFrames;
err = ExtAudioFileRead(audiofile, &rFrames, bufferList);
// close the file
err = ExtAudioFileDispose(audiofile);
// process the audio
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
float *fBuf = (float *)bufferList->mBuffers[ii].mData;
for (int jj=0; jj < rFrames; ++jj) {
*fBuf = *fBuf * ampScale;
fBuf++;
}
}
// open the file for writing
err = ExtAudioFileCreateWithURL((CFURLRef)theURL, fileType, &fileFormat, NULL, kAudioFileFlags_EraseFile, &audiofile);
NSError *error = NULL;
/*************************** You Need This Now ****************************/
AVAudioSession *session = [AVAudioSession sharedInstance];
[session setCategory:AVAudioSessionCategoryAudioProcessing error:&error];
/************************* End You Need This Now **************************/
// tell the ExtAudioFile API what format we'll be sending samples in
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
error = [NSError errorWithDomain:NSOSStatusErrorDomain
code:err
userInfo:nil];
NSLog(#"Error: %#", [error description]);
// write the data
err = ExtAudioFileWrite(audiofile, rFrames, bufferList);
// close the file
ExtAudioFileDispose(audiofile);
// destroy the buffers
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
free(bufferList->mBuffers[ii].mData);
}
free(bufferList);
bufferList = NULL;
}
Thanks for the help provided in this post.
I just would like to add one thing as you should restore the AVAudioSession back to what it was or you'll end up not playing anything.
AVAudioSession *session = [AVAudioSession sharedInstance];
NSString *originalSessionCategory = [session category];
[session setCategory:AVAudioSessionCategoryAudioProcessing error:&error];
...
...
// restore category
[session setCategory:originalSessionCategory error:&error];
if(error)
NSLog(#"%#",[error localizedDescription]);
Cheers
For Setting the different volumes of Mutable Tracks you can use below Code
self.audioMix = [AVMutableAudioMix audioMix];
AVMutableAudioMixInputParameters *audioInputParams = [AVMutableAudioMixInputParameters audioMixInputParameters];
[audioInputParams setVolume:0.1 atTime:kCMTimeZero];
[audioInputParams setVolume:0.1 atTime:kCMTimeZero];
audioInputParams.trackID = compositionAudioTrack2.trackID;
AVMutableAudioMixInputParameters *audioInputParams1 = [AVMutableAudioMixInputParameters audioMixInputParameters];
[audioInputParams1 setVolume:0.9 atTime:kCMTimeZero];
audioInputParams1.trackID = compositionAudioTrack1.trackID;
AVMutableAudioMixInputParameters *audioInputParams2 = [AVMutableAudioMixInputParameters audioMixInputParameters];
[audioInputParams2 setVolume:0.3 atTime:kCMTimeZero];
audioInputParams2.trackID = compositionAudioTrack.trackID;
self.audioMix.inputParameters =[NSArray arrayWithObjects:audioInputParams,audioInputParams1,audioInputParams2, nil];

Reading audio samples via AVAssetReader

How do you read audio samples via AVAssetReader? I've found examples of duplicating or mixing using AVAssetReader, but those loops are always controlled by the AVAssetWriter loop. Is it possible just to create an AVAssetReader and read through it, getting each sample and throwing the int32 of each audio sample into an array?
Thanks.
To expand on #amrox's answer, you can get an AudioBufferList from the CMBlockBufferRef, e.g.
CMItemCount numSamplesInBuffer = CMSampleBufferGetNumSamples(buffer);
AudioBufferList audioBufferList;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
buffer,
NULL,
&audioBufferList,
sizeof(audioBufferList),
NULL,
NULL,
kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment,
&buffer
);
for (int bufferCount=0; bufferCount < audioBufferList.mNumberBuffers; bufferCount++) {
SInt16* samples = (SInt16 *)audioBufferList.mBuffers[bufferCount].mData;
for (int i=0; i < numSamplesInBuffer; i++) {
// amplitude for the sample is samples[i], assuming you have linear pcm to start with
}
}
//Release the buffer when done with the samples
//(retained by CMSampleBufferGetAudioBufferListWithRetainedblockBuffer)
CFRelease(buffer);
AVAssetReader *reader = [[AVAssetReader alloc] initWithAsset:asset error:&error];
AVAssetTrack *track = [[asset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0];
NSDictionary *settings = #{ AVFormatIDKey : [NSNumber numberWithInt:kAudioFormatLinearPCM] };
AVAssetReaderTrackOutput *readerOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:track
outputSettings:settings];
[reader addOutput:readerOutput];
[reader startReading];
CMSampleBufferRef sample = [readerOutput copyNextSampleBuffer];
while ( sample )
{
sample = [readerOutput copyNextSampleBuffer];
if ( ! sample )
{
continue;
}
CMBlockBufferRef buffer = CMSampleBufferGetDataBuffer(sample);
size_t lengthAtOffset;
size_t totalLength;
char *data;
if ( CMBlockBufferGetDataPointer( buffer, 0, &lengthAtOffset, &totalLength, &data ) != noErr )
{
NSLog(#"error!");
break;
}
// do something with data...
CFRelease(sample);
}
The answers here are not generic. The call to CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer might fail when the AudioBufferList needs to be sized differently. When having non-intererleaved samples as example.
The correct way is to call CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer twice. The first call queries the size needed for the AudioBufferList and second one actually fills the AudioBufferList.
size_t bufferSize = 0;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
&bufferSize,
NULL,
0,
NULL,
NULL,
0,
NULL
);
AudioBufferList *bufferList = malloc(bufferSize);
CMBlockBufferRef blockBuffer = NULL;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
NULL,
bufferList,
bufferSize,
NULL,
NULL,
kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment,
&blockBuffer
);
// handle audio here
free(bufferList);
CFRelease(blockBuffer);
In a real world example you must perform error handling and also you should not malloc every frame, instead cache the AudioBufferList.