How to find the total duration of the audiofile playing so that i can program to display the contents of the audiofile in viewcontrollers according to the time.
if ([audioPlayer currentTime] ==11){
[self performSelector:#selector(viewController) withObject:nil];
} else {
if ([audioPlayer currentTime] ==23){
[self performSelector:#selector(secondViewController) withObject:nil];
}
If you're using AVAudioPlayer, you can use it's duration property to retrieve the length of the loaded audio file in seconds.
Use the AudioFileServices functions...
NSURL *afUrl = [NSURL fileURLWithPath:soundPath];
AudioFileID fileID;
OSStatus result = AudioFileOpenURL((CFURLRef)afUrl, kAudioFileReadPermission, 0, &fileID);
UInt64 outDataSize = 0;
UInt32 thePropSize = sizeof(UInt64);
result = AudioFileGetProperty(fileID, kAudioFilePropertyEstimatedDuration, &thePropSize, &outDataSize);
AudioFileClose(fileID);
for more info refer these links.. http://developer.apple.com/library/ios/#codinghowtos/AudioAndVideo/_index.html
https://developer.apple.com/library/mac/#documentation/musicaudio/reference/AudioFileConvertRef/Reference/reference.html
EDIT: This is to get the length of file even if it is not associated with the AVPlayer.. so you can populate a list ... hoping this helps somebody.. :D
Related
I'm trying to process a local video file and simply do some analysis on the pixel data. Nothing is being output. My current code iterates through each frame of the video but I'd actually like to skip ~15 frames at a time to speed things up. Is there a way to skip over frames without decoding them?
In Ffmpeg, I could simply call av_read_frame without calling avcodec_decode_video2.
Thanks in advance! Here's my current code:
- (void) readMovie:(NSURL *)url
{
[self performSelectorOnMainThread:#selector(updateInfo:) withObject:#"scanning" waitUntilDone:YES];
startTime = [NSDate date];
AVURLAsset * asset = [AVURLAsset URLAssetWithURL:url options:nil];
[asset loadValuesAsynchronouslyForKeys:[NSArray arrayWithObject:#"tracks"] completionHandler:
^{
dispatch_async(dispatch_get_main_queue(),
^{
AVAssetTrack * videoTrack = nil;
NSArray * tracks = [asset tracksWithMediaType:AVMediaTypeVideo];
if ([tracks count] == 1)
{
videoTrack = [tracks objectAtIndex:0];
videoDuration = CMTimeGetSeconds([videoTrack timeRange].duration);
NSError * error = nil;
// _movieReader is a member variable
_movieReader = [[AVAssetReader alloc] initWithAsset:asset error:&error];
if (error)
NSLog(#"%#", error.localizedDescription);
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber* value = [NSNumber numberWithUnsignedInt: kCVPixelFormatType_420YpCbCr8Planar];
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key];
AVAssetReaderTrackOutput* output = [AVAssetReaderTrackOutput
assetReaderTrackOutputWithTrack:videoTrack
outputSettings:videoSettings];
output.alwaysCopiesSampleData = NO;
[_movieReader addOutput:output];
if ([_movieReader startReading])
{
NSLog(#"reading started");
[self readNextMovieFrame];
}
else
{
NSLog(#"reading can't be started");
}
}
});
}];
}
- (void) readNextMovieFrame
{
//NSLog(#"readNextMovieFrame called");
if (_movieReader.status == AVAssetReaderStatusReading)
{
//NSLog(#"status is reading");
AVAssetReaderTrackOutput * output = [_movieReader.outputs objectAtIndex:0];
CMSampleBufferRef sampleBuffer = [output copyNextSampleBuffer];
if (sampleBuffer)
{ // I'm guessing this is the expensive part that we can skip if we want to skip frames
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// Lock the image buffer
CVPixelBufferLockBaseAddress(imageBuffer,0);
// Get information of the image
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
// do my pixel analysis
// Unlock the image buffer
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
CFRelease(sampleBuffer);
[self readNextMovieFrame];
}
else
{
NSLog(#"could not copy next sample buffer. status is %d", _movieReader.status);
NSTimeInterval scanDuration = -[startTime timeIntervalSinceNow];
float scanMultiplier = videoDuration / scanDuration;
NSString* info = [NSString stringWithFormat:#"Done\n\nvideo duration: %f seconds\nscan duration: %f seconds\nmultiplier: %f", videoDuration, scanDuration, scanMultiplier];
[self performSelectorOnMainThread:#selector(updateInfo:) withObject:info waitUntilDone:YES];
}
}
else
{
NSLog(#"status is now %d", _movieReader.status);
}
}
- (void) updateInfo: (id*)message
{
NSString* info = [NSString stringWithFormat:#"%#", message];
[infoTextView setText:info];
}
If you want less accurate frame processing (not frame by frame) you should use AVAssetImageGenerator.
This class returns a frame for a specified time you asked.
Specifically, build an Array filled with times between the clip's duration with 0.5s difference between each time (iPhone films at about 29.3 fps if you want every 15 frames its about frame for every 30 seconds) and let the image generator returns your frames.
For each frame you can see the time you requested and the actual time of the frame. It's default value is around 0.5s tolerance from the time you asked but you can also change that by changing the properties:
requestedTimeToleranceBefore
and
requestedTimeToleranceAfter
I hope I answered your question,
Good luck.
I'm trying to get to play a simple sound file through OpenAL by referring this tutorial:
I've created a monolithic code from it to test initially, but cant get the sound to play. I've been trying a lot of stuff but I can't get the sound to play. Any help is much appreciated.
Thanks.
Here is my code:
ALCdevice* device;
device = alcOpenDevice(NULL);
ALCcontext* context;
alcCreateContext(device, NULL);
alcMakeContextCurrent(context);
NSString* path = [[NSBundle mainBundle] pathForResource:#"mg" ofType:#"caf"];
NSURL* pathURL = [NSURL fileURLWithPath:path];
AudioFileID audioID;
OSStatus audioStatus = AudioFileOpenURL((CFURLRef)pathURL, kAudioFileReadPermission, 0, &audioID);
UInt32 fileSize = 0;
UInt64 outDataSize;
UInt32 propSize = sizeof(UInt64);
OSStatus res = AudioFileGetProperty(audioID, kAudioFilePropertyAudioDataByteCount, &propSize, &outDataSize);
fileSize = (UInt32)outDataSize;
unsigned char* outData = malloc(fileSize);
OSStatus res2 = AudioFileReadBytes(audioID, false, 0, &fileSize, outData);
AudioFileClose(audioID);
ALuint bufferID;
alGenBuffers(1, &bufferID);
alBufferData(bufferID, AL_FORMAT_STEREO16, outData, fileSize, 44100);
ALuint sourceID = 2;
alGenSources(1, &sourceID);
alSourcei(sourceID, AL_BUFFER, bufferID);
alSourcef(sourceID, AL_PITCH, 1.0f);
alSourcef(sourceID, AL_GAIN, 1.0f);
alSourcei(sourceID, AL_LOOPING, AL_FALSE);
free(outData);
outData = NULL;
alSourcePlay(sourceID);
Just found out!
I have been using:
alcCreateContext(device, NULL);
instead of:
context = alcCreateContext(device, NULL);
The alGetError() helped in pointing this out.
I am converting my recorded audio which is in .m4a format to .caf format. The settings of the recorded audio is as given below:
/* Record settings for recording the audio*/
recordSetting = [[NSDictionary alloc] initWithObjectsAndKeys:[NSNumber numberWithInt:kAudioFormatMPEG4AAC],AVFormatIDKey,
[NSNumber numberWithInt:44100.0],AVSampleRateKey,
[NSNumber numberWithInt: 2],AVNumberOfChannelsKey,
[NSNumber numberWithInt:16],AVLinearPCMBitDepthKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsBigEndianKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsFloatKey,
nil];
I convert the audio to .caf using this function:
-(NSString *)handleConvertToPCM:(NSURL *)convertUrl
{
[self performSelectorOnMainThread:#selector(showActivity) withObject:nil waitUntilDone:NO];
DEBUG_LOG(#"DEBUGGING");
DEBUG_LOG(#"handleConvertToPCM");
// open an ExtAudioFile
NSLog (#"opening %#", convertUrl);
ExtAudioFileRef inputFile;
CheckResult (ExtAudioFileOpenURL((CFURLRef)convertUrl, &inputFile),
"ExtAudioFileOpenURL failed");
// prepare to convert to a plain ol' PCM format
AudioStreamBasicDescription requiredPCMFormat;
requiredPCMFormat.mSampleRate = 44100; // todo: or use source rate?
requiredPCMFormat.mFormatID = kAudioFormatLinearPCM ;
requiredPCMFormat.mFormatFlags = kAudioFormatFlagsCanonical;
requiredPCMFormat.mChannelsPerFrame = 2;
requiredPCMFormat.mFramesPerPacket = 1;
requiredPCMFormat.mBitsPerChannel = 16;
requiredPCMFormat.mBytesPerPacket = 4;
requiredPCMFormat.mBytesPerFrame = 4;
CheckResult (ExtAudioFileSetProperty(inputFile, kExtAudioFileProperty_ClientDataFormat,
sizeof (requiredPCMFormat), &requiredPCMFormat),
"ExtAudioFileSetProperty failed");
// allocate a big buffer. size can be arbitrary for ExtAudioFile.
UInt32 outputBufferSize = 0x10000;
void* ioBuf = malloc (outputBufferSize);
UInt32 sizePerPacket = requiredPCMFormat.mBytesPerPacket;
UInt32 packetsPerBuffer = outputBufferSize / sizePerPacket;
// set up output file
self.outputPath = [NSString stringWithFormat:#"%#/export-pcm.caf",DOCUMENTS_FOLDER];
self.outputURL = [NSURL fileURLWithPath:self.outputPath];
DEBUG_LOG(#"creating output file %#", self.outputURL);
AudioFileID outputFile;
CheckResult(AudioFileCreateWithURL((CFURLRef)outputURL,
kAudioFileCAFType,
&requiredPCMFormat,
kAudioFileFlags_EraseFile,
&outputFile),
"AudioFileCreateWithURL failed");
// start convertin'
UInt32 outputFilePacketPosition = 0; //in bytes
while (true)
{
// wrap the destination buffer in an AudioBufferList
AudioBufferList convertedData;
convertedData.mNumberBuffers = 1;
convertedData.mBuffers[0].mNumberChannels = requiredPCMFormat.mChannelsPerFrame;
convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
convertedData.mBuffers[0].mData = ioBuf;
UInt32 frameCount = packetsPerBuffer;
// read from the extaudiofile
CheckResult (ExtAudioFileRead(inputFile,
&frameCount,
&convertedData),
"Couldn't read from input file");
if (frameCount == 0)
{
printf ("done reading from file");
break;
}
// write the converted data to the output file
CheckResult (AudioFileWritePackets(outputFile,
false,
frameCount,
NULL,
outputFilePacketPosition / requiredPCMFormat.mBytesPerPacket,
&frameCount,
convertedData.mBuffers[0].mData),
"Couldn't write packets to file");
DEBUG_LOG(#"Converted %ld bytes", outputFilePacketPosition);
// advance the output file write location
outputFilePacketPosition += (frameCount * requiredPCMFormat.mBytesPerPacket);
}
// clean up
ExtAudioFileDispose(inputFile);
AudioFileClose(outputFile);
return(self.outputPath);
}
My problem is that the size of the converted file is very high compared to the file given for conversion.Is there anyway to decrease the size by changing the conversion settings.
I tried compressing the file obtained , but it takes much time to compress.So I would like to get a way to decrease size along with conversion.
Decompressing a highly compressed audio file almost always results in a much larger result file, unless you re-compress using an even lossier compression format.
First time asking a question here. I'm hoping the post is clear and sample code is formatted correctly.
I'm experimenting with AVFoundation and time lapse photography.
My intent is to grab every Nth frame from the video camera of an iOS device (my iPod touch, version 4) and write each of those frames out to a file to create a timelapse. I'm using AVCaptureVideoDataOutput, AVAssetWriter and AVAssetWriterInput.
The problem is, if I use the CMSampleBufferRef passed to captureOutput:idOutputSampleBuffer:fromConnection:, the playback of each frame is the length of time between original input frames. A frame rate of say 1fps. I'm looking to get 30fps.
I've tried using CMSampleBufferCreateCopyWithNewTiming(), but then after 13 frames are written to the file, the captureOutput:idOutputSampleBuffer:fromConnection: stops being called. The interface is active and I can tap a button to stop the capture and save it to the photo library for playback. It appears to play back as I want it, 30fps, but it only has those 13 frames.
How can I accomplish my goal of 30fps playback?
How can I tell where the app is getting lost and why?
I've placed a flag called useNativeTime so I can test both cases. When set to YES, I get all frames I'm interested in as the callback doesn't 'get lost'. When I set that flag to NO, I only ever get 13 frames processed and am never returned to that method again. As mentioned above, in both cases I can playback the video.
Thanks for any help.
Here is where I'm trying to do the retiming.
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
BOOL useNativeTime = NO;
BOOL appendSuccessFlag = NO;
//NSLog(#"in captureOutpput sample buffer method");
if( !CMSampleBufferDataIsReady(sampleBuffer) )
{
NSLog( #"sample buffer is not ready. Skipping sample" );
//CMSampleBufferInvalidate(sampleBuffer);
return;
}
if (! [inputWriterBuffer isReadyForMoreMediaData])
{
NSLog(#"Not ready for data.");
}
else {
// Write every first frame of n frames (30 native from camera).
intervalFrames++;
if (intervalFrames > 30) {
intervalFrames = 1;
}
else if (intervalFrames != 1) {
//CMSampleBufferInvalidate(sampleBuffer);
return;
}
// Need to initialize start session time.
if (writtenFrames < 1) {
if (useNativeTime) imageSourceTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
else imageSourceTime = CMTimeMake( 0 * 20 ,600); //CMTimeMake(1,30);
[outputWriter startSessionAtSourceTime: imageSourceTime];
NSLog(#"Starting CMtime");
CMTimeShow(imageSourceTime);
}
if (useNativeTime) {
imageSourceTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
CMTimeShow(imageSourceTime);
// CMTime myTiming = CMTimeMake(writtenFrames * 20,600);
// CMSampleBufferSetOutputPresentationTimeStamp(sampleBuffer, myTiming); // Tried but has no affect.
appendSuccessFlag = [inputWriterBuffer appendSampleBuffer:sampleBuffer];
}
else {
CMSampleBufferRef newSampleBuffer;
CMSampleTimingInfo sampleTimingInfo;
sampleTimingInfo.duration = CMTimeMake(20,600);
sampleTimingInfo.presentationTimeStamp = CMTimeMake( (writtenFrames + 0) * 20,600);
sampleTimingInfo.decodeTimeStamp = kCMTimeInvalid;
OSStatus myStatus;
//NSLog(#"numSamples of sampleBuffer: %i", CMSampleBufferGetNumSamples(sampleBuffer) );
myStatus = CMSampleBufferCreateCopyWithNewTiming(kCFAllocatorDefault,
sampleBuffer,
1,
&sampleTimingInfo, // maybe a little confused on this param.
&newSampleBuffer);
// These confirm the good heath of our newSampleBuffer.
if (myStatus != 0) NSLog(#"CMSampleBufferCreateCopyWithNewTiming() myStatus: %i",myStatus);
if (! CMSampleBufferIsValid(newSampleBuffer)) NSLog(#"CMSampleBufferIsValid NOT!");
// No affect.
//myStatus = CMSampleBufferMakeDataReady(newSampleBuffer); // How is this different; CMSampleBufferSetDataReady ?
//if (myStatus != 0) NSLog(#"CMSampleBufferMakeDataReady() myStatus: %i",myStatus);
imageSourceTime = CMSampleBufferGetPresentationTimeStamp(newSampleBuffer);
CMTimeShow(imageSourceTime);
appendSuccessFlag = [inputWriterBuffer appendSampleBuffer:newSampleBuffer];
//CMSampleBufferInvalidate(sampleBuffer); // Docs don't describe action. WTF does it do? Doesn't seem to affect my problem. Used with CMSampleBufferSetInvalidateCallback maybe?
//CFRelease(sampleBuffer); // - Not surprisingly - “EXC_BAD_ACCESS”
}
if (!appendSuccessFlag)
{
NSLog(#"Failed to append pixel buffer");
}
else {
writtenFrames++;
NSLog(#"writtenFrames: %i", writtenFrames);
}
}
//[self displayOuptutWritterStatus]; // Expect and see AVAssetWriterStatusWriting.
}
My setup routine.
- (IBAction) recordingStartStop: (id) sender
{
NSError * error;
if (self.isRecording) {
NSLog(#"~~~~~~~~~ STOPPING RECORDING ~~~~~~~~~");
self.isRecording = NO;
[recordingStarStop setTitle: #"Record" forState: UIControlStateNormal];
//[self.captureSession stopRunning];
[inputWriterBuffer markAsFinished];
[outputWriter endSessionAtSourceTime:imageSourceTime];
[outputWriter finishWriting]; // Blocks until file is completely written, or an error occurs.
NSLog(#"finished CMtime");
CMTimeShow(imageSourceTime);
// Really, I should loop through the outputs and close all of them or target specific ones.
// Since I'm only recording video right now, I feel safe doing this.
[self.captureSession removeOutput: [[self.captureSession outputs] objectAtIndex: 0]];
[videoOutput release];
[inputWriterBuffer release];
[outputWriter release];
videoOutput = nil;
inputWriterBuffer = nil;
outputWriter = nil;
NSLog(#"~~~~~~~~~ STOPPED RECORDING ~~~~~~~~~");
NSLog(#"Calling UIVideoAtPathIsCompatibleWithSavedPhotosAlbum.");
NSLog(#"filePath: %#", [projectPaths movieFilePath]);
if (UIVideoAtPathIsCompatibleWithSavedPhotosAlbum([projectPaths movieFilePath])) {
NSLog(#"Calling UISaveVideoAtPathToSavedPhotosAlbum.");
UISaveVideoAtPathToSavedPhotosAlbum ([projectPaths movieFilePath], self, #selector(video:didFinishSavingWithError: contextInfo:), nil);
}
NSLog(#"~~~~~~~~~ WROTE RECORDING to PhotosAlbum ~~~~~~~~~");
}
else {
NSLog(#"~~~~~~~~~ STARTING RECORDING ~~~~~~~~~");
projectPaths = [[ProjectPaths alloc] initWithProjectFolder: #"TestProject"];
intervalFrames = 30;
videoOutput = [[AVCaptureVideoDataOutput alloc] init];
NSMutableDictionary * cameraVideoSettings = [[[NSMutableDictionary alloc] init] autorelease];
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber* value = [NSNumber numberWithUnsignedInt: kCVPixelFormatType_32BGRA]; //kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange];
[cameraVideoSettings setValue: value forKey: key];
[videoOutput setVideoSettings: cameraVideoSettings];
[videoOutput setMinFrameDuration: CMTimeMake(20, 600)]; //CMTimeMake(1, 30)]; // 30fps
[videoOutput setAlwaysDiscardsLateVideoFrames: YES];
queue = dispatch_queue_create("cameraQueue", NULL);
[videoOutput setSampleBufferDelegate: self queue: queue];
dispatch_release(queue);
NSMutableDictionary *outputSettings = [[[NSMutableDictionary alloc] init] autorelease];
[outputSettings setValue: AVVideoCodecH264 forKey: AVVideoCodecKey];
[outputSettings setValue: [NSNumber numberWithInt: 1280] forKey: AVVideoWidthKey]; // currently assuming
[outputSettings setValue: [NSNumber numberWithInt: 720] forKey: AVVideoHeightKey];
NSMutableDictionary *compressionSettings = [[[NSMutableDictionary alloc] init] autorelease];
[compressionSettings setValue: AVVideoProfileLevelH264Main30 forKey: AVVideoProfileLevelKey];
//[compressionSettings setValue: [NSNumber numberWithDouble:1024.0*1024.0] forKey: AVVideoAverageBitRateKey];
[outputSettings setValue: compressionSettings forKey: AVVideoCompressionPropertiesKey];
inputWriterBuffer = [AVAssetWriterInput assetWriterInputWithMediaType: AVMediaTypeVideo outputSettings: outputSettings];
[inputWriterBuffer retain];
inputWriterBuffer.expectsMediaDataInRealTime = YES;
outputWriter = [AVAssetWriter assetWriterWithURL: [projectPaths movieURLPath] fileType: AVFileTypeQuickTimeMovie error: &error];
[outputWriter retain];
if (error) NSLog(#"error for outputWriter = [AVAssetWriter assetWriterWithURL:fileType:error:");
if ([outputWriter canAddInput: inputWriterBuffer]) [outputWriter addInput: inputWriterBuffer];
else NSLog(#"can not add input");
if (![outputWriter canApplyOutputSettings: outputSettings forMediaType:AVMediaTypeVideo]) NSLog(#"ouptutSettings are NOT supported");
if ([captureSession canAddOutput: videoOutput]) [self.captureSession addOutput: videoOutput];
else NSLog(#"could not addOutput: videoOutput to captureSession");
//[self.captureSession startRunning];
self.isRecording = YES;
[recordingStarStop setTitle: #"Stop" forState: UIControlStateNormal];
writtenFrames = 0;
imageSourceTime = kCMTimeZero;
[outputWriter startWriting];
//[outputWriter startSessionAtSourceTime: imageSourceTime];
NSLog(#"~~~~~~~~~ STARTED RECORDING ~~~~~~~~~");
NSLog (#"recording to fileURL: %#", [projectPaths movieURLPath]);
}
NSLog(#"isRecording: %#", self.isRecording ? #"YES" : #"NO");
[self displayOuptutWritterStatus];
}
OK, I found the bug in my first post.
When using
myStatus = CMSampleBufferCreateCopyWithNewTiming(kCFAllocatorDefault,
sampleBuffer,
1,
&sampleTimingInfo,
&newSampleBuffer);
you need to balance that with a CFRelease(newSampleBuffer);
The same idea holds true when using a CVPixelBufferRef with a piexBufferPool of an AVAssetWriterInputPixelBufferAdaptor instance. You would use CVPixelBufferRelease(yourCVPixelBufferRef); after calling the appendPixelBuffer: withPresentationTime: method.
Hope this is helpful to someone else.
With a little more searching and reading I have a working solution. Don't know that it is best method, but so far, so good.
In my setup area I've setup an AVAssetWriterInputPixelBufferAdaptor. The code addition looks like this.
InputWriterBufferAdaptor = [AVAssetWriterInputPixelBufferAdaptor
assetWriterInputPixelBufferAdaptorWithAssetWriterInput: inputWriterBuffer
sourcePixelBufferAttributes: nil];
[inputWriterBufferAdaptor retain];
For completeness to understand the code below, I also have these three lines in the setup method.
fpsOutput = 30; //Some possible values: 30, 10, 15 24, 25, 30/1.001 or 29.97;
cmTimeSecondsDenominatorTimescale = 600 * 100000; //To more precisely handle 29.97.
cmTimeNumeratorValue = cmTimeSecondsDenominatorTimescale / fpsOutput;
Instead of applying a retiming to a copy of the sample buffer. I now have the following three lines of code that effectively does the same thing. Notice the withPresentationTime parameter for the adapter. By passing my custom value to that, I gain the correct timing I'm seeking.
CVPixelBufferRef myImage = CMSampleBufferGetImageBuffer( sampleBuffer );
imageSourceTime = CMTimeMake( writtenFrames * cmTimeNumeratorValue, cmTimeSecondsDenominatorTimescale);
appendSuccessFlag = [inputWriterBufferAdaptor appendPixelBuffer: myImage withPresentationTime: imageSourceTime];
Use of the AVAssetWriterInputPixelBufferAdaptor.pixelBufferPool property may have some gains, but I haven't figured that out.
I'm following a tutorial about playing sound with OpenAL. Now everything works fine except I can't make the sound looping. I believe that I've used AL_LOOPING for the source. Now it can only play once and when it finishes playing, the app will block(doesn't response to my tap on the play button). Any ideas about what's wrong with the code?
// start up openAL
// init device and context
-(void)initOpenAL
{
// Initialization
mDevice = alcOpenDevice(NULL); // select the "preferred device"
if (mDevice) {
// use the device to make a context
mContext = alcCreateContext(mDevice, NULL);
// set my context to the currently active one
alcMakeContextCurrent(mContext);
}
}
// open the audio file
// returns a big audio ID struct
-(AudioFileID)openAudioFile:(NSString*)filePath
{
AudioFileID outAFID;
// use the NSURl instead of a cfurlref cuz it is easier
NSURL * afUrl = [NSURL fileURLWithPath:filePath];
// do some platform specific stuff..
#if TARGET_OS_IPHONE
OSStatus result = AudioFileOpenURL((CFURLRef)afUrl, kAudioFileReadPermission, 0, &outAFID);
#else
OSStatus result = AudioFileOpenURL((CFURLRef)afUrl, fsRdPerm, 0, &outAFID);
#endif
if (result != 0) NSLog(#"cannot openf file: %#",filePath);
return outAFID;
}
// find the audio portion of the file
// return the size in bytes
-(UInt32)audioFileSize:(AudioFileID)fileDescriptor
{
UInt64 outDataSize = 0;
UInt32 thePropSize = sizeof(UInt64);
OSStatus result = AudioFileGetProperty(fileDescriptor, kAudioFilePropertyAudioDataByteCount, &thePropSize, &outDataSize);
if(result != 0) NSLog(#"cannot find file size");
return (UInt32)outDataSize;
}
- (void)stopSound
{
alSourceStop(sourceID);
}
-(void)cleanUpOpenAL:(id)sender
{
// delete the sources
alDeleteSources(1, &sourceID);
// delete the buffers
alDeleteBuffers(1, &bufferID);
// destroy the context
alcDestroyContext(mContext);
// close the device
alcCloseDevice(mDevice);
}
-(IBAction)play:(id)sender
{
alSourcePlay(sourceID);
}
#pragma mark -
// Implement viewDidLoad to do additional setup after loading the view, typically from a nib.
- (void)viewDidLoad {
[super viewDidLoad];
[self initOpenAL];
// get the full path of the file
NSString* fileName = [[NSBundle mainBundle] pathForResource:#"sound" ofType:#"caf"];
// first, open the file
AudioFileID fileID = [self openAudioFile:fileName];
// find out how big the actual audio data is
UInt32 fileSize = [self audioFileSize:fileID];
// this is where the audio data will live for the moment
unsigned char * outData = malloc(fileSize);
// this where we actually get the bytes from the file and put them
// into the data buffer
OSStatus result = noErr;
result = AudioFileReadBytes(fileID, false, 0, &fileSize, outData);
AudioFileClose(fileID); //close the file
if (result != 0) NSLog(#"cannot load effect: %#", fileName);
//NSUInteger bufferID; // buffer is defined in head file
// grab a buffer ID from openAL
alGenBuffers(1, &bufferID);
// jam the audio data into the new buffer
alBufferData(bufferID, AL_FORMAT_STEREO16, outData, fileSize, 8000);
//NSUInteger sourceID; // source is defined in head file
// grab a source ID from openAL
alGenSources(1, &sourceID);
// attach the buffer to the source
alSourcei(sourceID, AL_BUFFER, bufferID);
// set some basic source prefs
alSourcef(sourceID, AL_PITCH, 1.0f);
alSourcef(sourceID, AL_GAIN, 1.0f);
alSourcei(sourceID, AL_LOOPING, AL_TRUE);
// clean up the buffer
if (outData)
{
free(outData);
outData = NULL;
}
}
You should be able to release outData right after your alBufferData() call. It exclude it as the culprit, you can try the static extension and manage the memory yourself. It's something like:
alBufferDataStaticProcPtr alBufferDataStaticProc = (alBufferDataStaticProcPtr)alcGetProcAddress(0, (const ALCchar *)"alBufferDataStatic");
alBufferDataStaticProc(bufferID, bitChanFormat, audioData, audioDataSize, dataFormat.mSampleRate);