I am trying to call the 'media capture putting it all together' example from AVFoundation Programming Guide. I keep getting a blank (black) image. Is there anything I need to call first to have this code access the camera?
thanks
This is unmodified code from the example:
-(void) setupCapture {
AVCaptureSession *session = [[AVCaptureSession alloc] init];
session.sessionPreset = AVCaptureSessionPresetLow;
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
NSError *error = nil;
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:device error:&error];
if (!input) {
// Handle the error appropriately.
NSLog(#"no input");
return;
}
[session addInput:input];
AVCaptureVideoDataOutput *output = [[[AVCaptureVideoDataOutput alloc] init] autorelease];
[session addOutput:output];
output.videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA]
forKey:(id)kCVPixelBufferPixelFormatTypeKey];
output.minFrameDuration = CMTimeMake(1, 15);
dispatch_queue_t queue = dispatch_queue_create("MyQueue", NULL);
[output setSampleBufferDelegate:self queue:queue];
dispatch_release(queue);
[session startRunning];
}
UIImage *imageFromSampleBuffer(CMSampleBufferRef sampleBuffer) {
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// Lock the base address of the pixel buffer.
CVPixelBufferLockBaseAddress(imageBuffer,0);
// Get the number of bytes per row for the pixel buffer.
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
// Get the pixel buffer width and height.
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
// Create a device-dependent RGB color space.
static CGColorSpaceRef colorSpace = NULL;
if (colorSpace == NULL) {
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL) {
// Handle the error appropriately.
return nil;
}
}
// Get the base address of the pixel buffer.
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
// Get the data size for contiguous planes of the pixel buffer.
size_t bufferSize = CVPixelBufferGetDataSize(imageBuffer);
// Create a Quartz direct-access data provider that uses data we supply.
CGDataProviderRef dataProvider =
CGDataProviderCreateWithData(NULL, baseAddress, bufferSize, NULL);
// Create a bitmap image from data supplied by the data provider.
CGImageRef cgImage =
CGImageCreate(width, height, 8, 32, bytesPerRow,
colorSpace, kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Little,
dataProvider, NULL, true, kCGRenderingIntentDefault);
CGDataProviderRelease(dataProvider);
// Create and return an image object to represent the Quartz image.
UIImage *image = [UIImage imageWithCGImage:cgImage];
CGImageRelease(cgImage);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
return image;
}
and my callback method is:
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection {
UIImage *image = imageFromSampleBuffer(sampleBuffer);
NSLog(#"am I nil?: %#", image);
self.imageV.image = image;
[self.view setNeedsDisplay];
}
tip 1:
do not start capturesession in main viewDidLoad method, but a little later
tip 2:
do not update your ui in the capturesession callback method, do it on the main thread.
Related
I am using the below code to make a video from a static 16:9 image using AVAsset writer. The problem is that for some reason the video that is produced is in a 4:3 format.
Can anyone suggest a way that I can either amend the code to produce a 16:9 video, or alternatively, how I can convert the 4:3 video to 16:9.
Thank you
- (void) createVideoFromStillImage
{
//Set the size according to the device type (iPhone or iPad).
CGSize size = CGSizeMake(screenWidth, screenHeight);
NSString *betaCompressionDirectory = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/IntroVideo.mov"];
NSError *error = nil;
unlink([betaCompressionDirectory UTF8String]);
//----initialize compression engine
AVAssetWriter *videoWriter = [[AVAssetWriter alloc] initWithURL:[NSURL fileURLWithPath:betaCompressionDirectory]
fileType:AVFileTypeQuickTimeMovie
error:&error];
NSParameterAssert(videoWriter);
if(error)
NSLog(#"error = %#", [error localizedDescription]);
NSDictionary *videoSettings = [NSDictionary dictionaryWithObjectsAndKeys: AVVideoCodecH264,AVVideoCodecKey,
[NSNumber numberWithInt:size.height], AVVideoWidthKey,
[NSNumber numberWithInt:size.width], AVVideoHeightKey, nil];
AVAssetWriterInput *writerInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:videoSettings];
NSDictionary *sourcePixelBufferAttributesDictionary = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, nil];
AVAssetWriterInputPixelBufferAdaptor *adaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:writerInput
sourcePixelBufferAttributes:sourcePixelBufferAttributesDictionary];
NSParameterAssert(writerInput);
NSParameterAssert([videoWriter canAddInput:writerInput]);
if ([videoWriter canAddInput:writerInput])
NSLog(#"I can add this input");
else
NSLog(#"i can't add this input");
[videoWriter addInput:writerInput];
[videoWriter startWriting];
[videoWriter startSessionAtSourceTime:kCMTimeZero];
//CGImageRef theImage = [finishedMergedImage CGImage];
CGImageRef theImage = [introImage CGImage];
//dispatch_queue_t dispatchQueue = dispatch_queue_create("mediaInputQueue", NULL);
int __block frame = 0;
//Calculate how much progress % one frame completion represents. Maximum of 75%.
float currentProgress = 0.0;
float progress = (80.0 / kDurationOfIntroOutro);
//NSLog(#"Progress is %f", progress);
for (int i=0; i<=kDurationOfIntroOutro; i++) {
//Update our progress view for every frame that is generated.
[self updateProgressView:currentProgress];
currentProgress +=progress;
//NSLog(#"CurrentProgress is %f", currentProgress);
frame++;
[NSThread sleepForTimeInterval:0.05]; //Delay to allow buffer to be ready.
CVPixelBufferRef buffer = (CVPixelBufferRef)[self pixelBufferFromCGImage:theImage size:size];
if (buffer) {
if (adaptor.assetWriterInput.readyForMoreMediaData)
{
if(![adaptor appendPixelBuffer:buffer withPresentationTime:CMTimeMake(frame, 20)])
NSLog(#"FAIL");
else
NSLog(#"Success:%d", frame);
CFRelease(buffer);
}
}
}
[writerInput markAsFinished];
[videoWriter finishWriting];
[videoWriter release];
//NSLog(#"outside for loop");
//Grab the URL for the video so we can use it later.
NSURL * url = [self applicationDocumentsDirectory : kIntroVideoFileName];
[assetURLArray setObject:url forKey:kIntroVideo];
}
- (CVPixelBufferRef )pixelBufferFromCGImage:(CGImageRef)image size:(CGSize)size
{
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey,
[NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey, nil];
CVPixelBufferRef pxbuffer = NULL;
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, size.width, size.height, kCVPixelFormatType_32ARGB, (CFDictionaryRef) options, &pxbuffer);
// CVReturn status = CVPixelBufferPoolCreatePixelBuffer(NULL, adaptor.pixelBufferPool, &pxbuffer);
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);
CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
NSParameterAssert(pxdata != NULL);
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, size.width, size.height, 8, 4*size.width, rgbColorSpace, kCGImageAlphaPremultipliedFirst);
NSParameterAssert(context);
CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(image), CGImageGetHeight(image)), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);
CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
return pxbuffer;
}
So that this can be closed out, I'll restate what I did above. The videoSettings dictionary you use should be using the target dimensions of your video, but you're passing in the dimensions of your view. Unless that's what you want to record, you'll need to change the values passed in for the AVVideoWidthKey and AVVideoWidthKey to be the correct output sizes.
Given that the iOS device screens have aspect ratios close to 4:3, this is what probably was leading to that ratio on the recorded video.
I'm having memory issues while reading video frames from an existing video chosen from the iPhone library. First I added the UIImage-frames themselves into an Array, but I thought that the array was too big for the memory after a while, so instead I save the UIImages in the documents folder and add the imagepath to the array. However, I still get the same memory warnings even though checking with Instruments for allocations. The total allocated memory never gets over 2.5mb. Also there are no leaks found... Can anyone think of something?
-(void)addFrame:(UIImage *)image
{
NSString *imgPath = [NSString stringWithFormat:#"%#/Analysis%d-%d.png", docFolder, currentIndex, framesArray.count];
[UIImagePNGRepresentation(image) writeToFile:imgPath atomically:YES];
[framesArray addObject:imgPath];
frameCount++;
}
-(void)imagePickerController:(UIImagePickerController *)picker didFinishPickingMediaWithInfo:(NSDictionary *)info
{
[picker dismissModalViewControllerAnimated:YES];
[framesArray removeAllObjects];
frameCount = 0;
// incoming video
NSURL *videoURL = [info valueForKey:UIImagePickerControllerMediaURL];
//NSLog(#"Video : %#", videoURL);
// AVURLAsset to read input movie (i.e. mov recorded to local storage)
NSDictionary *inputOptions = [NSDictionary dictionaryWithObject:[NSNumber numberWithBool:YES] forKey:AVURLAssetPreferPreciseDurationAndTimingKey];
AVURLAsset *inputAsset = [[AVURLAsset alloc] initWithURL:videoURL options:inputOptions];
// Load the input asset tracks information
[inputAsset loadValuesAsynchronouslyForKeys:[NSArray arrayWithObject:#"tracks"] completionHandler: ^{
NSError *error = nil;
nrFrames = CMTimeGetSeconds([inputAsset duration]) * 30;
NSLog(#"Total frames = %d", nrFrames);
// Check status of "tracks", make sure they were loaded
AVKeyValueStatus tracksStatus = [inputAsset statusOfValueForKey:#"tracks" error:&error];
if (!tracksStatus == AVKeyValueStatusLoaded)
// failed to load
return;
/* Read video samples from input asset video track */
AVAssetReader *reader = [AVAssetReader assetReaderWithAsset:inputAsset error:&error];
NSMutableDictionary *outputSettings = [NSMutableDictionary dictionary];
[outputSettings setObject: [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey: (NSString*)kCVPixelBufferPixelFormatTypeKey];
AVAssetReaderTrackOutput *readerVideoTrackOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:[[inputAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0] outputSettings:outputSettings];
// Assign the tracks to the reader and start to read
[reader addOutput:readerVideoTrackOutput];
if ([reader startReading] == NO) {
// Handle error
NSLog(#"Error reading");
}
NSAutoreleasePool *pool = [NSAutoreleasePool new];
while (reader.status == AVAssetReaderStatusReading)
{
if(!memoryProblem)
{
CMSampleBufferRef sampleBufferRef = [readerVideoTrackOutput copyNextSampleBuffer];
if (sampleBufferRef)
{
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBufferRef);
/*Lock the image buffer*/
CVPixelBufferLockBaseAddress(imageBuffer,0);
/*Get information about the image*/
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
/*We unlock the image buffer*/
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
/*Create a CGImageRef from the CVImageBufferRef*/
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
/*We release some components*/
CGContextRelease(newContext);
CGColorSpaceRelease(colorSpace);
UIImage *image= [UIImage imageWithCGImage:newImage scale:[UIScreen mainScreen].scale orientation:UIImageOrientationRight];
//[self addFrame:image];
[self performSelectorOnMainThread:#selector(addFrame:) withObject:image waitUntilDone:YES];
/*We release the CGImageRef*/
CGImageRelease(newImage);
CMSampleBufferInvalidate(sampleBufferRef);
CFRelease(sampleBufferRef);
sampleBufferRef = NULL;
}
}
else
{
break;
}
}
[pool release];
NSLog(#"Finished");
}];
}
You do one thing and try.
Move the NSAutoreleasePool inside the while loop and drain that inside the loop.
So that it would be like as follows:
while (reader.status == AVAssetReaderStatusReading)
{
NSAutoreleasePool *pool = [NSAutoreleasePool new];
.....
[pool drain];
}
I want to doing image processing in real time by using openCV.
My final target is showing the result in realtime on the screen while the other side camera is capturing the video by using AVFoundation frameworks.
How can I process every video frame by OpenCV, and show the result on the screen in real time?
Use AVAssertReader
//Setup Reader
AVURLAsset * asset = [AVURLAsset URLAssetWithURL:urlvalue options:nil];
[asset loadValuesAsynchronouslyForKeys:[NSArray arrayWithObject:#"tracks"] completionHandler: ^{ dispatch_async(dispatch_get_main_queue(), ^{
AVAssetTrack * videoTrack = nil;
NSArray * tracks = [asset tracksWithMediaType:AVMediaTypeVideo];
if ([tracks count] == 1) {
videoTrack = [tracks objectAtIndex:0];
NSError * error = nil;
_movieReader = [[AVAssetReader alloc] initWithAsset:asset error:&error];
if (error)
NSLog(error.localizedDescription);
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_4444AYpCbCr16]; NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key];
[_movieReader addOutput:[AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:videoTrack outputSettings:videoSettings]];
[_movieReader startReading];
}
});
}];
to get next movie frame
static int frameCount=0;
- (void) readNextMovieFrame {
if (_movieReader.status == AVAssetReaderStatusReading) {
AVAssetReaderTrackOutput * output = [_movieReader.outputs objectAtIndex:0];
CMSampleBufferRef sampleBuffer = [output copyNextSampleBuffer];
if (sampleBuffer) {
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// Lock the image buffer
CVPixelBufferLockBaseAddress(imageBuffer,0);
// Get information of the image
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
/*We unlock the image buffer*/
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
/*Create a CGImageRef from the CVImageBufferRef*/
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
/*We release some components*/
CGContextRelease(newContext);
CGColorSpaceRelease(colorSpace);
/*We display the result on the custom layer*/
/*self.customLayer.contents = (id) newImage;*/
/*We display the result on the image view (We need to change the orientation of the image so that the video is displayed correctly)*/
UIImage *image= [UIImage imageWithCGImage:newImage scale:0.0 orientation:UIImageOrientationRight];
UIGraphicsBeginImageContext(image.size);
[image drawAtPoint:CGPointMake(0, 0)];
// UIImage *img=UIGraphicsGetImageFromCurrentImageContext();
videoImage=UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
//videoImage=image;
// if (frameCount < 40) {
NSLog(#"readNextMovieFrame==%d",frameCount);
NSString* filename = [NSString stringWithFormat:#"Documents/frame_%d.png", frameCount];
NSString* pngPath = [NSHomeDirectory() stringByAppendingPathComponent:filename];
[UIImagePNGRepresentation(videoImage) writeToFile: pngPath atomically: YES];
frameCount++;
// }
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
CFRelease(sampleBuffer);
}
}
}
once your _movieReader reach end then you need to restart again.
In iPhone App,
Can we take pictures at some perticular time intervals programmatically by using iphone device camera ?
If yes then please let me know how we can take pictures programmatically in iPhone App?
Please Help and Suggest.
Thanks,
UIImagePickerController has a takePicture method that can be called programmatically.
You can use UIImagePickerController has a takePicture method to take picture.
For more control over the picture you can use AVFoundation header which contains avcapturestillimageoutput method to capture images. More Info
import this file in .h :
AVFoundation/AVCaptureInput.h
AVFoundation/AVCaptureDevice.h
AVFoundation/AVCaptureOutput.h
AVFoundation/AVMediaFormat.h
put in .m :
- (AVCaptureDevice *)frontCamera
{
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices)
{
if ([device position] == AVCaptureDevicePositionFront)
{
return device;
}
}
return nil;
}
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection
{
CGImageRef cgImage = [self imageFromSampleBuffer:sampleBuffer];
self.theImage = [UIImage imageWithCGImage: cgImage ];
CGImageRelease( cgImage );
NSCalendar *sysCalendar = [[NSCalendar alloc]initWithCalendarIdentifier:NSCalendarIdentifierGregorian];
NSDateFormatter *df = [[NSDateFormatter alloc]init];
df.calendar = sysCalendar;
[df setDateFormat:#"dd_MM_yyyy hh:mm:ss"];
StrCapture = [NSString stringWithFormat:#"%#.jpeg",[df stringFromDate:[NSDate date]]];
NSLog(#"StrCapture : %#",StrCapture);
NSData *imageData = UIImageJPEGRepresentation(self.theImage,1);
NSFileManager *fileManager = [NSFileManager defaultManager];
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *fullPath = [documentsDirectory stringByAppendingPathComponent:StrCapture];
[fileManager createFileAtPath:fullPath contents:imageData attributes:nil];
NSLog(#"ImagePAth : %#",fullPath);
}
- (CGImageRef) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer
{
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer,0);
uint8_t baseAddress = (uint8_t )CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
CGContextRelease(newContext);
CGColorSpaceRelease(colorSpace);
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
return newImage;
}
I'm trying to use AVCaptureSession to get images from the front camera for processing. So far each time a new frame was available I simply assigned it to a variable, and ran an NSTimer that checks every tenth of a second if there's a new frame, and if there is it processes it.
I would like to get a frame, freeze the camera, and get the next frame whenever I like. Something like [captureSession getNextFrame] you know?
Here's a part of my code although I'm not sure how helpful it may be:
- (void)startFeed {
loopTimerIndex = 0;
NSArray *captureDevices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput
deviceInputWithDevice:[captureDevices objectAtIndex:1]
error:nil];
AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc] init];
captureOutput.minFrameDuration = CMTimeMake(1, 10);
captureOutput.alwaysDiscardsLateVideoFrames = true;
dispatch_queue_t queue;
queue = dispatch_queue_create("cameraQueue", nil);
[captureOutput setSampleBufferDelegate:self queue:queue];
dispatch_release(queue);
NSString *key = (NSString *)kCVPixelBufferPixelFormatTypeKey;
NSNumber *value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA];
NSDictionary *videoSettings = [NSDictionary dictionaryWithObject:value forKey:key];
[captureOutput setVideoSettings:videoSettings];
captureSession = [[AVCaptureSession alloc] init];
captureSession.sessionPreset = AVCaptureSessionPresetLow;
[captureSession addInput:captureInput];
[captureSession addOutput:captureOutput];
imageView = [[UIImage alloc] init];
[captureSession startRunning];
}
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection {
loopTimerIndex++;
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer, 0);
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
CGContextRelease(newContext);
CGColorSpaceRelease(colorSpace);
imageView = [UIImage imageWithCGImage:newImage scale:1.0 orientation:UIImageOrientationLeftMirrored];
[delegate updatePresentor:imageView];
if(loopTimerIndex == 1) {
[delegate feedStarted];
}
CGImageRelease(newImage);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
[pool drain];
}
You don't actively poll the camera to get frames back, because that's not how the capture process is architected. Instead, if you would like to only display frames every tenth of a second instead of every 1/30th or faster, you should just ignore the frames in between.
For example, you could maintain a timestamp that you would compare against every time -captureOutput:didOutputSampleBuffer:fromConnection: was triggered. If the timestamp is greater than or equal to 0.1 seconds from right now, process and display the camera frame and reset the timestamp to the current time. Otherwise, ignore the frame.