I'm trying to get audio to work with the video for an iOS application. The video is fine. No audio is recorded to the file (My iPhone speaker works.)
Here's the init setup:
session = [[AVCaptureSession alloc] init];
menu->session = session;
menu_open = NO;
session.sessionPreset = AVCaptureSessionPresetMedium;
camera = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
microphone = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
menu->camera = camera;
[session beginConfiguration];
[camera lockForConfiguration:nil];
if([camera isExposureModeSupported:AVCaptureExposureModeContinuousAutoExposure]){
camera.exposureMode = AVCaptureExposureModeContinuousAutoExposure;
}
if([camera isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]){
camera.focusMode = AVCaptureFocusModeContinuousAutoFocus;
}
if([camera isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance]){
camera.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance;
}
if ([camera hasTorch]) {
if([camera isTorchModeSupported:AVCaptureTorchModeOn]){
[camera setTorchMode:AVCaptureTorchModeOn];
}
}
[camera unlockForConfiguration];
[session commitConfiguration];
AVCaptureDeviceInput * camera_input = [AVCaptureDeviceInput deviceInputWithDevice:camera error:nil];
[session addInput:camera_input];
microphone_input = [[AVCaptureDeviceInput deviceInputWithDevice:microphone error:nil] retain];
AVCaptureVideoDataOutput * output = [[[AVCaptureVideoDataOutput alloc] init] autorelease];
output.videoSettings = [NSDictionary dictionaryWithObject: [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey];
[session addOutput:output];
output.minFrameDuration = CMTimeMake(1,30);
dispatch_queue_t queue = dispatch_queue_create("MY QUEUE", NULL);
[output setSampleBufferDelegate:self queue:queue];
dispatch_release(queue);
audio_output = [[[AVCaptureAudioDataOutput alloc] init] retain];
queue = dispatch_queue_create("MY QUEUE", NULL);
AudioOutputBufferDelegate * special_delegate = [[[AudioOutputBufferDelegate alloc] init] autorelease];
special_delegate->normal_delegate = self;
[special_delegate retain];
[audio_output setSampleBufferDelegate:special_delegate queue:queue];
dispatch_release(queue);
[session startRunning];
Here is the beginning and end of recording:
if (recording) { //Hence stop recording
[video_button setTitle:#"Video" forState: UIControlStateNormal];
recording = NO;
[writer_input markAsFinished];
[audio_writer_input markAsFinished];
[video_writer endSessionAtSourceTime: CMTimeMakeWithSeconds([[NSDate date] timeIntervalSinceDate: start_time],30)];
[video_writer finishWriting];
UISaveVideoAtPathToSavedPhotosAlbum(temp_url,self,#selector(video:didFinishSavingWithError:contextInfo:),nil);
[start_time release];
[temp_url release];
[av_adaptor release];
[microphone lockForConfiguration:nil];
[session beginConfiguration];
[session removeInput:microphone_input];
[session removeOutput:audio_output];
[session commitConfiguration];
[microphone unlockForConfiguration];
[menu restateConfigiration];
[vid_off play];
}else{ //Start recording
[vid_on play];
[microphone lockForConfiguration:nil];
[session beginConfiguration];
[session addInput:microphone_input];
[session addOutput:audio_output];
[session commitConfiguration];
[microphone unlockForConfiguration];
[menu restateConfigiration];
[video_button setTitle:#"Stop" forState: UIControlStateNormal];
recording = YES;
NSError *error = nil;
NSFileManager * file_manager = [[NSFileManager alloc] init];
temp_url = [[NSString alloc] initWithFormat:#"%#/%#", NSTemporaryDirectory(), #"temp.mp4"];
[file_manager removeItemAtPath: temp_url error:NULL];
[file_manager release];
video_writer = [[AVAssetWriter alloc] initWithURL: [NSURL fileURLWithPath:temp_url] fileType: AVFileTypeMPEG4 error: &error];
NSDictionary *video_settings = [NSDictionary dictionaryWithObjectsAndKeys: AVVideoCodecH264, AVVideoCodecKey,[NSNumber numberWithInt:360], AVVideoWidthKey,[NSNumber numberWithInt:480], AVVideoHeightKey,nil];
writer_input = [[AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:video_settings] retain];
AudioChannelLayout acl;
bzero( &acl, sizeof(acl));
acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono;
audio_writer_input = [[AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeAudio outputSettings: [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithInt: kAudioFormatMPEG4AAC], AVFormatIDKey,[NSNumber numberWithInt: 1], AVNumberOfChannelsKey,[NSNumber numberWithFloat: 44100.0], AVSampleRateKey,[NSNumber numberWithInt: 64000], AVEncoderBitRateKey,[NSData dataWithBytes: &acl length: sizeof(acl) ], AVChannelLayoutKey,nil]] retain];
audio_writer_input.expectsMediaDataInRealTime = YES;
av_adaptor = [[AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput: writer_input sourcePixelBufferAttributes:NULL] retain];
[video_writer addInput:writer_input];
[video_writer addInput: audio_writer_input];
[video_writer startWriting];
[video_writer startSessionAtSourceTime: CMTimeMake(0,1)];
start_time = [[NSDate alloc] init];
}
Here is the delegate for the audio:
#implementation AudioOutputBufferDelegate
-(void)captureOutput: (AVCaptureOutput *) captureOutput didOutputSampleBuffer: (CMSampleBufferRef) sampleBuffer fromConnection: (AVCaptureConnection *) conenction{
if (normal_delegate->recording) {
CMSampleBufferSetOutputPresentationTimeStamp(sampleBuffer,CMTimeMakeWithSeconds([[NSDate date] timeIntervalSinceDate: normal_delegate->start_time],30));
[normal_delegate->audio_writer_input appendSampleBuffer: sampleBuffer];
}
}
#end
The video method doesn't matter because it works. "restateConfigiration" just sorts out the session configuration otherwise the torch goes off etc:
[session beginConfiguration];
switch (quality) {
case Low:
session.sessionPreset = AVCaptureSessionPresetLow;
break;
case Medium:
session.sessionPreset = AVCaptureSessionPreset640x480;
break;
}
[session commitConfiguration];
[camera lockForConfiguration:nil];
if([camera isExposureModeSupported:AVCaptureExposureModeContinuousAutoExposure]){
camera.exposureMode = AVCaptureExposureModeContinuousAutoExposure;
}
if([camera isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]){
camera.focusMode = AVCaptureFocusModeContinuousAutoFocus;
}
if([camera isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance]){
camera.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance;
}
if ([camera hasTorch]) {
if (torch) {
if([camera isTorchModeSupported:AVCaptureTorchModeOn]){
[camera setTorchMode:AVCaptureTorchModeOn];
}
}else{
if([camera isTorchModeSupported:AVCaptureTorchModeOff]){
[camera setTorchMode:AVCaptureTorchModeOff];
}
}
}
[camera unlockForConfiguration];
THank you for any help.
AVAssetWriter and Audio
This may be the same issue as mentioned in the linked post. Try commenting out these lines
[writer_input markAsFinished];
[audio_writer_input markAsFinished];
[video_writer endSessionAtSourceTime: CMTimeMakeWithSeconds([[NSDate date] timeIntervalSinceDate: start_time],30)];
Edit
I don't know if the way you are setting the presentation time stamp is necessarily wrong. The way I handle this is with a local variable that is set to 0 on start. Then when my delegate receives the first packet I do:
if (_startTime.value == 0) {
_startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
}
and then
[bufferWriter->writer startWriting];
[bufferWriter->writer startSessionAtSourceTime:_startTime];
Your code looks valid as you are calculating the time difference for each received packet. However, AVFoundation calculates this for you, and also optimizes the timestamps for placement in the interleaved container. Another thing I am unsure of is each CMSampleBufferRef for audio contains more then 1 data buffer where each data buffer has it's own PTS. I am not sure if setting the PTS automatically adjusts all the other data buffers.
Where my code differs from yours is I use a single dispatch queue for both audio and video. In the callback I use (some code removed).
switch (bufferWriter->writer.status) {
case AVAssetWriterStatusUnknown:
if (_startTime.value == 0) {
_startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
}
[bufferWriter->writer startWriting];
[bufferWriter->writer startSessionAtSourceTime:_startTime];
//Break if not ready, otherwise fall through.
if (bufferWriter->writer.status != AVAssetWriterStatusWriting) {
break ;
}
case AVAssetWriterStatusWriting:
if( captureOutput == self.captureManager.audioOutput) {
if( !bufferWriter->audioIn.readyForMoreMediaData) {
break;
}
#try {
if( ![bufferWriter->audioIn appendSampleBuffer:sampleBuffer] ) {
[self delegateMessage:#"Audio Writing Error" withType:ERROR];
}
}
#catch (NSException *e) {
NSLog(#"Audio Exception: %#", [e reason]);
}
}
else if( captureOutput == self.captureManager.videoOutput ) {
if( !bufferWriter->videoIn.readyForMoreMediaData) {
break;;
}
#try {
if (!frontCamera) {
if( ![bufferWriter->videoIn appendSampleBuffer:sampleBuffer] ) {
[self delegateMessage:#"Video Writing Error" withType:ERROR];
}
}
else {
CMTime pt = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
flipBuffer(sampleBuffer, pixelBuffer);
if( ![bufferWriter->adaptor appendPixelBuffer:pixelBuffer withPresentationTime:pt] ) {
[self delegateMessage:#"Video Writing Error" withType:ERROR];
}
}
}
#catch (NSException *e) {
NSLog(#"Video Exception Exception: %#", [e reason]);
}
}
break;
case AVAssetWriterStatusCompleted:
return;
case AVAssetWriterStatusFailed:
[self delegateMessage:#"Critical Error Writing Queues" withType:ERROR];
bufferWriter->writer_failed = YES ;
_broadcastError = YES;
[self stopCapture] ;
return;
case AVAssetWriterStatusCancelled:
break;
default:
break;
}
Related
I'm having problem with recording video+audio on ios7;
I have created the application on ios6.1.3 and everything worked perfect until I have updated my iphone to ios7.
I use AVCaptureSession and connect input devices(microphone and camera);
Please see my code below, this is how I setup devices:
- (void)setupSessionWithPreset:(NSString *)preset withCaptureDevice:(AVCaptureDevicePosition)cd withTorchMode:(AVCaptureTorchMode)tm withError:(NSError **)error
{
AVAudioSession *audioSession = [AVAudioSession sharedInstance];
[audioSession setCategory:AVAudioSessionCategoryPlayAndRecord error:nil];
[audioSession setActive:YES error:nil];
// NSLog(#"sessionError :%#", sessionError);
self.movieFileOutput = [[AVCaptureMovieFileOutput alloc] init];
[self startNotificationObservers];
if(setupComplete)
{
*error = [NSError errorWithDomain:#"Setup session already complete." code:102 userInfo:nil];
return;
}
setupComplete = YES;
AVCaptureDevice *captureDevice = [self cameraWithPosition:cd];
if ([captureDevice hasTorch])
{
if ([captureDevice lockForConfiguration:nil])
{
if ([captureDevice isTorchModeSupported:tm])
{
[captureDevice setTorchMode:AVCaptureTorchModeOff];
}
[captureDevice unlockForConfiguration];
}
}
self.captureSession = [[AVCaptureSession alloc] init];
self.captureSession.sessionPreset = preset;
NSError *errorVI = nil;
self.videoInput = [[AVCaptureDeviceInput alloc] initWithDevice:captureDevice error:&errorVI];
NSLog(#"Video Input :%#", errorVI);
if([self.captureSession canAddInput:self.videoInput])
{
[self.captureSession addInput:self.videoInput];
}
else
{
*error = [NSError errorWithDomain:#"Error setting video input." code:101 userInfo:nil];
return;
}
NSError *errorAI = nil;
self.audioInput = [[AVCaptureDeviceInput alloc] initWithDevice:[self audioDevice] error:&errorAI];
if([self.captureSession canAddInput:self.audioInput])
{
[self.captureSession addInput:self.audioInput];
}
else
{
*error = [NSError errorWithDomain:#"Settings->Privacy->Microphone->VideoFrames(On)" code:500 userInfo:nil];
return;
}
if([self.captureSession canAddOutput:self.movieFileOutput])
{
[self.captureSession addOutput:self.movieFileOutput];
}
else
{
*error = [NSError errorWithDomain:#"Error setting file output." code:101 userInfo:nil];
return;
}
}
As a result I received only thumbnail and complete audio but no video. Please help me< I would really appreciate that.
I want to change the video orientation from front camera,
So I choose the previewLayer.orientation,and it indeed work .
Since the method is deprecated from IOS6 and I get a warning to use previewLayer.connection.videoOrientation
but I cannot access the connection property from previewLayer
- (void)addDeviceInput {
[session beginConfiguration];
[session removeInput:videoInput];
[self setVideoInput:nil];
AVCaptureDeviceInput *newDeviceInput = [[AVCaptureDeviceInput alloc] initWithDevice:[self frontFacingCamera] error:nil];
if ([session canAddInput:newDeviceInput]) {
[session addInput:newDeviceInput];
[self setVideoInput:newDeviceInput];
}
newDeviceInput = nil;
if (previewLayer) {
[previewLayer removeFromSuperlayer];
[previewLayer release];
previewLayer = nil;
}
[session commitConfiguration];
}
- (id)setupCameraSession {
if (!(self = [super init])) {
return nil;
}
self.frameRate = 0;
session = [[AVCaptureSession alloc] init];
[session beginConfiguration];
[session setSessionPreset:AVCaptureSessionPreset352x288];
[self addDeviceInput]; //invoke previous method
AVCaptureVideoDataOutput * newVideoDataOutput = [[AVCaptureVideoDataOutput alloc] init];
newVideoDataOutput.videoSettings = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:kCVPixelFormatType_32BGRA],
kCVPixelBufferPixelFormatTypeKey,nil];
newVideoDataOutput.alwaysDiscardsLateVideoFrames = YES;
if ([session canAddOutput:newVideoDataOutput]) {
[session addOutput:newVideoDataOutput];
[self setVideoOutput:newVideoDataOutput];
}
[newVideoDataOutput release];
self.frameRate = VIDEO_FRAME_RATE;
[session commitConfiguration];
AVCaptureVideoPreviewLayer * newPreviewLayer =
[[AVCaptureVideoPreviewLayer alloc] initWithSession:session];
//[newPreviewLayer setSession:session]; //it doesn't work? and get error
[self setPreviewLayer:newPreviewLayer];
[newPreviewLayer release];
[session startRunning];
return self;
}
I get the error
error: Execution was interrupted, reason: Attempted to dereference an invalid ObjC Object or send it an unrecognized selector.
I am using AVCapture Session to record video same time i am using STT(google Speech to text api) to convert voice into text. I have facing a problem when I click on the speak button then camera get freezes. Any correct answer will be acceptable. Thanks in advance .
To start camera in
-(void)viewDidLoad;
if ([[self captureManager] setupSession]) {
// Create video preview layer and add it to the UI
AVCaptureVideoPreviewLayer *newCaptureVideoPreviewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:[[self captureManager] session]];
UIView *view = [self videoPreviewView];
CALayer *viewLayer = [view layer];
[viewLayer setMasksToBounds:YES];
CGRect bounds = [view bounds];
[newCaptureVideoPreviewLayer setFrame:bounds];
if ([newCaptureVideoPreviewLayer isOrientationSupported]) {
[newCaptureVideoPreviewLayer setOrientation:AVCaptureVideoOrientationLandscapeLeft|AVCaptureVideoOrientationLandscapeRight];
}
[newCaptureVideoPreviewLayer setVideoGravity:AVLayerVideoGravityResizeAspectFill];
[viewLayer insertSublayer:newCaptureVideoPreviewLayer below:[[viewLayer sublayers] objectAtIndex:0]];
[self setCaptureVideoPreviewLayer:newCaptureVideoPreviewLayer];
[newCaptureVideoPreviewLayer release];
// Start the session. This is done asychronously since -startRunning doesn't return until the session is running.
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
[[[self captureManager] session] startRunning];
});
[self updateButtonStates];
}
- (BOOL) setupSession
{
BOOL success = NO;
// Set torch and flash mode to auto
if ([[self backFacingCamera] hasFlash]) {
if ([[self backFacingCamera] lockForConfiguration:nil]) {
if ([[self backFacingCamera] isFlashModeSupported:AVCaptureFlashModeAuto]) {
[[self backFacingCamera] setFlashMode:AVCaptureFlashModeAuto];
}
[[self backFacingCamera] unlockForConfiguration];
}
}
if ([[self backFacingCamera] hasTorch]) {
if ([[self backFacingCamera] lockForConfiguration:nil]) {
if ([[self backFacingCamera] isTorchModeSupported:AVCaptureTorchModeAuto]) {
[[self backFacingCamera] setTorchMode:AVCaptureTorchModeAuto];
}
[[self backFacingCamera] unlockForConfiguration];
}
}
// Init the device inputs
AVCaptureDeviceInput *newVideoInput = [[AVCaptureDeviceInput alloc] initWithDevice:[self frontFacingCamera] error:nil];
AVCaptureDeviceInput *newAudioInput = [[AVCaptureDeviceInput alloc] initWithDevice:[self audioDevice] error:nil];
// Setup the still image file output
AVCaptureStillImageOutput *newStillImageOutput = [[AVCaptureStillImageOutput alloc] init];
NSDictionary *outputSettings = [[NSDictionary alloc] initWithObjectsAndKeys:
AVVideoCodecJPEG, AVVideoCodecKey,
nil];
[newStillImageOutput setOutputSettings:outputSettings];
[outputSettings release];
// Create session (use default AVCaptureSessionPresetHigh)
AVCaptureSession *newCaptureSession = [[AVCaptureSession alloc] init];
// Add inputs and output to the capture session
if ([newCaptureSession canAddInput:newVideoInput]) {
[newCaptureSession addInput:newVideoInput];
}
if ([newCaptureSession canAddInput:newAudioInput]) {
[newCaptureSession addInput:newAudioInput];
}
if ([newCaptureSession canAddOutput:newStillImageOutput]) {
[newCaptureSession addOutput:newStillImageOutput];
}
[self setStillImageOutput:newStillImageOutput];
[self setVideoInput:newVideoInput];
[self setAudioInput:newAudioInput];
[self setSession:newCaptureSession];
[newStillImageOutput release];
[newVideoInput release];
[newAudioInput release];
[newCaptureSession release];
// Set up the movie file output
NSURL *outputFileURL = [self tempFileURL];
AVCamRecorder *newRecorder = [[AVCamRecorder alloc] initWithSession:[self session] outputFileURL:outputFileURL];
[newRecorder setDelegate:self];
// Send an error to the delegate if video recording is unavailable
if (![newRecorder recordsVideo] && [newRecorder recordsAudio]) {
NSString *localizedDescription = NSLocalizedString(#"Video recording unavailable", #"Video recording unavailable description");
NSString *localizedFailureReason = NSLocalizedString(#"Movies recorded on this device will only contain audio. They will be accessible through iTunes file sharing.", #"Video recording unavailable failure reason");
NSDictionary *errorDict = [NSDictionary dictionaryWithObjectsAndKeys:
localizedDescription, NSLocalizedDescriptionKey,
localizedFailureReason, NSLocalizedFailureReasonErrorKey,
nil];
NSError *noVideoError = [NSError errorWithDomain:#"AVCam" code:0 userInfo:errorDict];
if ([[self delegate] respondsToSelector:#selector(captureManager:didFailWithError:)]) {
[[self delegate] captureManager:self didFailWithError:noVideoError];
}
}
[self setRecorder:newRecorder];
[newRecorder release];
success = YES;
return success;
}
I need to capture Image & Video without opening imagepickerController.
You can Capture video and photo using AVCaptureSession
refer to iPhone SDK 4 AVFoundation - How to use captureStillImageAsynchronouslyFromConnection correctly?
-(void) viewDidAppear:(BOOL)animated
{
AVCaptureSession *session = [[AVCaptureSession alloc] init];
session.sessionPreset = AVCaptureSessionPresetMedium;
CALayer *viewLayer = self.vImagePreview.layer;
NSLog(#"viewLayer = %#", viewLayer);
AVCaptureVideoPreviewLayer *captureVideoPreviewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:session];
captureVideoPreviewLayer.frame = self.vImagePreview.bounds;
[self.vImagePreview.layer addSublayer:captureVideoPreviewLayer];
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
NSError *error = nil;
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:device error:&error];
if (!input) {
// Handle the error appropriately.
NSLog(#"ERROR: trying to open camera: %#", error);
}
[session addInput:input];
stillImageOutput = [[AVCaptureStillImageOutput alloc] init];
NSDictionary *outputSettings = [[NSDictionary alloc] initWithObjectsAndKeys: AVVideoCodecJPEG, AVVideoCodecKey, nil];
[stillImageOutput setOutputSettings:outputSettings];
[session addOutput:stillImageOutput];
[session startRunning];
}
-(IBAction) captureNow
{
AVCaptureConnection *videoConnection = nil;
for (AVCaptureConnection *connection in stillImageOutput.connections)
{
for (AVCaptureInputPort *port in [connection inputPorts])
{
if ([[port mediaType] isEqual:AVMediaTypeVideo] )
{
videoConnection = connection;
break;
}
}
if (videoConnection) { break; }
}
NSLog(#"about to request a capture from: %#", stillImageOutput);
[stillImageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler: ^(CMSampleBufferRef imageSampleBuffer, NSError *error)
{
CFDictionaryRef exifAttachments = CMGetAttachment( imageSampleBuffer, kCGImagePropertyExifDictionary, NULL);
if (exifAttachments)
{
// Do something with the attachments.
NSLog(#"attachements: %#", exifAttachments);
}
else
NSLog(#"no attachments");
NSData *imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageSampleBuffer];
UIImage *image = [[UIImage alloc] initWithData:imageData];
self.vImage.image = image;
}];
}
I am trying to take the camera input and write the data to disk using avassetwriter. From the delegate, it simply appears that avassetwriterinputpixelbufferadator is failing to append data. I'm not sure why
- (NSURL*) assetURL{
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSCachesDirectory, NSUserDomainMask, YES);
NSString *basePath = ([paths count] > 0) ? [paths objectAtIndex:0] : nil;
NSString *filePath = [basePath stringByAppendingPathComponent:#"videoOutput"];
return [NSURL fileURLWithPath:filePath];
}
- (id) init {
if(![super init]) return nil;
self.captureSession = [[c alloc] init];
self.captureSession.sessionPreset = AVCaptureSessionPresetHigh;
// HIGH: 640 x 480
// MEDIUM: 360 x 480
// LOW: 192 x 144
[self loadVideoInput];
[self loadVideoOutput];
[self loadPreviewLayer];
[self loadWriter];
return self;
}
- (void) loadVideoInput{
AVCaptureDevice *videoDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices) {
if (device.position == AVCaptureDevicePositionFront) {
videoDevice = device;
break;
}
}
if ( videoDevice ) {
NSError *error;
AVCaptureDeviceInput *videoIn = [AVCaptureDeviceInput deviceInputWithDevice:videoDevice error:&error];
if ( !error ) {
if ([self.captureSession canAddInput:videoIn])
[self.captureSession addInput:videoIn];
else NSLog(#"Couldn't add video input");
} else NSLog(#"Couldn't create video input");
} else NSLog(#"Couldn't create video capture device");
}
- (void) loadVideoOutput{
AVCaptureVideoDataOutput *output = [[AVCaptureVideoDataOutput alloc] init];
output.alwaysDiscardsLateVideoFrames = YES;
[output setSampleBufferDelegate:self queue:dispatch_get_main_queue()];
//dispatch_queue_t queue = dispatch_queue_create("myQueue", NULL);
//[output setSampleBufferDelegate:self queue:queue];
//dispatch_release(queue);
//output.minFrameDuration = CMTimeMake(15, 1); // If you wish to cap the frame rate to a known value, such as 15 fps, set
[output setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey]]; // BGRA is necessary for manual preview
if ([self.captureSession canAddOutput:videoOut])
[self.captureSession addOutput:videoOut];
else
NSLog(#"Couldn't add video output");
//[self.captureSession addOutput:output];
[output release];
}
- (void) loadPreviewLayer{
previewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:self.captureSession];
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
}
- (void) loadWriter{
NSError *error = nil;
videoWriter = [[AVAssetWriter alloc] initWithURL:[self assetURL] fileType:AVFileTypeQuickTimeMovie error:&error];
NSParameterAssert(videoWriter);
NSDictionary *videoSettings = [NSDictionary dictionaryWithObjectsAndKeys:
AVVideoCodecH264, AVVideoCodecKey,
[NSNumber numberWithInt:640], AVVideoWidthKey,
[NSNumber numberWithInt:480], AVVideoHeightKey,
nil];
writerInput = [[AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:videoSettings] retain];
writerInput.expectsMediaDataInRealTime = YES;
NSParameterAssert(writerInput);
NSParameterAssert([videoWriter canAddInput:writerInput]);
[videoWriter addInput:writerInput];
currentTime = kCMTimeZero;
adaptor = [[AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:writerInput sourcePixelBufferAttributes:nil] retain];
NSLog(#"Error? %#",error);
}
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
if(recording){
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
currentTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
// STUFF ISN'T WORKING HERE
BOOL success = [adaptor appendPixelBuffer:imageBuffer withPresentationTime:currentTime];
NSLog(#"%#",success ? #"YES" : #"NO");
}
}
- (void) startRecording{
[videoWriter startWriting];
[videoWriter startSessionAtSourceTime:currentTime];
recording = YES;
}
- (void) stopRecording{
recording = NO;
[writerInput markAsFinished];
[videoWriter endSessionAtSourceTime:currentTime];
[videoWriter finishWriting];
}
You need to remove the existing file ("filePath" in your case) each time, trying to record the video.
[adaptor appendPixelBuffer:imageBuffer withPresentationTime:currentTime];
maybe you need manager the video time by yourself, you get currentTime from sampleBuffer, it's not exactly the video time. try to accumulate the capture time pre frame