AVAssetWriter error: Cannot append media data after ending session - swift

This error occurs when capturing video with AVAssetWriter. However, after calling AVAssetWriter's finishWriting inside of endVideoCapture, there isn't another call to start writing again, so why is this occurring?
As you can see in the delegate function, captureOutput, we check the recording state before trying to append to the asset writer. The recording state is set to false in endVideoCapture.
Optional(Error Domain=AVFoundationErrorDomain Code=-11862 "Cannot
append media data after ending session"
UserInfo={NSLocalizedFailureReason=The application encountered a
programming error., NSLocalizedDescription=The operation is not
allowed, NSDebugDesc
func startVideoCapture() {
// Get capture resolution
let resolution = getCaptureResolution()
// Return if capture resolution not set
if resolution.width == 0 || resolution.height == 0 {
printError("Error starting capture because resolution invalid")
return
}
// If here, start capture
assetWriter = createAssetWriter(Int(resolution.width), outputHeight: Int(resolution.height))
let recordingClock = captureSession.masterClock
assetWriter!.startWriting()
assetWriter!.startSession(atSourceTime: CMClockGetTime(recordingClock!))
// Update time stamp
startTime = CACurrentMediaTime()
// Update <recording> flag & notify delegate
recording = true
delegate?.cameraDidStartVideoCapture()
}
func createAssetWriter(_ outputWidth: Int, outputHeight: Int) -> AVAssetWriter? {
// Update <outputURL> with temp file to hold video
let tempPath = gFile.getUniqueTempPath(gFile.MP4File)
outputURL = URL(fileURLWithPath: tempPath)
// Return new asset writer or nil
do {
// Create asset writer
let newWriter = try AVAssetWriter(outputURL: outputURL, fileType: AVFileTypeMPEG4)
// Define video settings
let videoSettings: [String : AnyObject] = [
AVVideoCodecKey : AVVideoCodecH264 as AnyObject,
AVVideoWidthKey : outputWidth as AnyObject,
AVVideoHeightKey : outputHeight as AnyObject,
]
// Add video input to writer
assetWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
assetWriterVideoInput!.expectsMediaDataInRealTime = true
newWriter.add(assetWriterVideoInput!)
// Define audio settings
let audioSettings : [String : AnyObject] = [
AVFormatIDKey : NSInteger(kAudioFormatMPEG4AAC) as AnyObject,
AVNumberOfChannelsKey : 2 as AnyObject,
AVSampleRateKey : NSNumber(value: 44100.0 as Double)
]
// Add audio input to writer
assetWriterAudioInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioSettings)
assetWriterAudioInput!.expectsMediaDataInRealTime = true
newWriter.add(assetWriterAudioInput!)
// Return writer
print("Created asset writer for \(outputWidth)x\(outputHeight) video")
return newWriter
} catch {
printError("Error creating asset writer: \(error)")
return nil
}
}
func endVideoCapture() {
// Update flag to stop data capture
recording = false
// Return if asset writer undefined
if assetWriter == nil {
return
}
// If here, end capture
// -- Mark inputs as done
assetWriterVideoInput!.markAsFinished()
assetWriterAudioInput!.markAsFinished()
// -- Finish writing
assetWriter!.finishWriting() {
self.assetWriterDidFinish()
}
}
func assetWriterDidFinish() {
print("Asset writer finished with status: \(getAssetWriterStatus())")
// Return early on error & tell delegate
if assetWriter!.error != nil {
printError("Error finishing asset writer: \(assetWriter!.error)")
delegate?.panabeeCameraDidEndVideoCapture(videoURL: nil, videoDur: 0, error: assetWriter!.error)
logEvent("Asset Writer Finish Error", userData: ["Error" : assetWriter!.error.debugDescription])
return
}
// If here, no error so extract video properties & tell delegate
let videoAsset = AVURLAsset(url: outputURL, options: nil)
let videoDur = CMTimeGetSeconds(videoAsset.duration)
let videoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
print("Camera created video. Duration: \(videoDur). Size: \(videoTrack.naturalSize). Transform: \(videoTrack.preferredTransform). URL: \(outputURL).")
// Tell delegate
delegate?.cameraDidEndVideoCapture(videoURL: outputURL.path, videoDur: videoDur, error: assetWriter!.error)
// Reset <assetWriter> to nil
assetWriter = nil
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
// Return if not recording
if !recording {
return
}
// If here, capture data
// Write video data?
if captureOutput == videoOutput && assetWriterVideoInput!.isReadyForMoreMediaData {
assetWriterVideoQueue!.async {
self.assetWriterVideoInput!.append(sampleBuffer)
}
}
// No, write audio data?
if captureOutput == audioOutput && assetWriterAudioInput!.isReadyForMoreMediaData {
assetWriterAudioQueue!.async {
self.assetWriterAudioInput!.append(sampleBuffer)
}
}
}

Related

AVAssetWriter used with ScreenCaptureKit leads to corrupt files and errors

I'm trying to use ScreenCaptureKit to write an app that will record meetings. However, anytime I instantiate an AVAssetWriter and start streaming CMSampleBuffers to it, it will fail with the error
Optional(Error Domain=AVFoundationErrorDomain Code=-11800 \"The operation could not be completed\" UserInfo={NSLocalizedFailureReason=An unknown error occurred (-12785), NSLocalizedDescription=The operation could not be completed, NSUnderlyingError=0x600000c066d0 {Error Domain=NSOSStatusErrorDomain Code=-12785 \"(null)\"}})
I've got a full code reproduction at this repo - https://github.com/jonluca/buggy-avassetwriter
What's weird is that this repo works, where it's ~95% the same code https://github.com/garethpaul/ScreenRecorderMacOS
I might just not be attuned to the intricacies of AVAssetWriter and the newer ScreenCaptureKit, but I'm just not sure how this error can be happening.
Alright this took a while to figure out but it turns out that you can't only check if a CMSampleBuffer is valid. It looks like it can still be invalid, so I added a util called isValidFrame that checks the SCFrameStatus option, as well as a few other keys
private func isValidFrame(for sampleBuffer: CMSampleBuffer) -> Bool {
// Retrieve the array of metadata attachments from the sample buffer.
guard let attachmentsArray = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer,
createIfNecessary: false) as? [[SCStreamFrameInfo: Any]],
let attachments = attachmentsArray.first
else {
return false
}
// Validate the status of the frame. If it isn't `.complete`, return nil.
guard let statusRawValue = attachments[SCStreamFrameInfo.status] as? Int,
let status = SCFrameStatus(rawValue: statusRawValue),
status == .complete
else {
return false
}
// Get the pixel buffer that contains the image data.
guard let pixelBuffer = sampleBuffer.imageBuffer else {
return false
}
// Get the backing IOSurface.
guard let surfaceRef = CVPixelBufferGetIOSurface(pixelBuffer)?.takeUnretainedValue() else {
return false
}
let surface = unsafeBitCast(surfaceRef, to: IOSurface.self)
// Retrieve the content rectangle, scale, and scale factor.
guard let contentRectDict = attachments[.contentRect],
let contentRect = CGRect(dictionaryRepresentation: contentRectDict as! CFDictionary),
let contentScale = attachments[.contentScale] as? CGFloat,
let scaleFactor = attachments[.scaleFactor] as? CGFloat
else {
return false
}
return true
}
/// - Tag: DidOutputSampleBuffer
func stream(_ stream: SCStream,
didOutputSampleBuffer sampleBuffer: CMSampleBuffer,
of outputType: SCStreamOutputType) {
// Return early if the sample buffer is invalid.
guard sampleBuffer.isValid else {
return
}
// Determine which type of data the sample buffer contains.
switch outputType {
case .audio:
self.recordComputerAudio(sampleBuffer: sampleBuffer)
case .screen:
guard isValidFrame(for: sampleBuffer) else {
return
}
self.recordFrame(sampleBuffer: sampleBuffer)
#unknown default:
fatalError("Encountered unknown stream output type: \(outputType)")
}
}

AVAssetWriter recorded audio no sound

My app can record the audio from the chat and save it in file. I recorded some music on the app screen but when I playback the audio.m4a file there is no sound coming out. The file show as "Apple MPEG-4 audio" and has 12KB size. Did I config the setting wrong? Thanks in advence.
edit: I added the stop recording function.
var assetWriter: AVAssetWriter?
var input: AVAssetWriterInput?
var channelLayout = AudioChannelLayout()
func record() {
guard let doc = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else {
return
}
let inputURL = docURL.appendingPathComponent("audio.m4a")
do {
try assetWriter = AVAssetWriter(outputURL: inputURL, fileType: .m4a)
} catch {
print("error: \(error)")
assetWriter = nil
return
}
guard let assetWriter = assetWriter else {
return
}
channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_MPEG_5_1_D
let audioSettings: [String : Any] = [
AVNumberOfChannelsKey: 6,
AVFormatIDKey: kAudioFormatMPEG4AAC_HE,
AVSampleRateKey: 44100,
AVEncoderBitRateKey: 128000,
AVChannelLayoutKey: NSData(bytes: &channelLayout, length: MemoryLayout.size(ofValue: channelLayout)),
]
input = AVAssetWriterInput(mediaType: .audio, outputSettings: settings)
guard let audioInput = input else {
print("Failed to find input.")
return
}
audioInput.expectsMediaDataInRealTime = true
if ((assetWriter.canAdd(audioInput)) != nil) {
assetWriter.add(audioInput)
}
RPScreenRecorder.shared().startCapture(handler: { (sample, bufferType, error) in
guard error == nil else {
print("Failed to capture with error: \(String(describing: error))")
return
}
if bufferType == .audioApp {
if assetWriter.status == AVAssetWriter.Status.unknown {
if ((assetWriter.startWriting()) != nil) {
assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sample))
}
}
if assetWriter.status == AVAssetWriter.Status.writing {
if audioInput.isReadyForMoreMediaData == true {
if audioInput.append(sample) == false {
}
}
}
}
})
}
func stopRecord() {
RPScreenRecorder.shared().stopCapture{ (error) in
self.audioInput.markAsFinished()
if error == nil{
self.assetWriter.finishWriting {
print("finish writing")
}
} else {
print(error as Any)
}
}
}
In light of your comments, you definitely don't need six channel audio. Try these simpler mono audio settings.
let audioSettings: [String : Any] = [
AVNumberOfChannelsKey: 1,
AVFormatIDKey: kAudioFormatMPEG4AAC,
AVSampleRateKey: 44100,
AVEncoderBitRateKey: 128000,
]
You don't say whether this is on iOS or macOS. You have a problem on macOS because as of 11.2.1 no .audioApp buffers are captured. If you still want microphone, you can configure that:
let recorder = RPScreenRecorder.shared()
recorder.isMicrophoneEnabled = true
recorder.startCapture(handler: { (sample, bufferType, error) in
if bufferType == .audioMic {
// etc
}
})
Don't bother checking for writer status, just append buffers when you can
if audioInput.isReadyForMoreMediaData {
if !audioInput.append(sample) {
// do something
}
}
PREVIOUSLY
You need to call assetWriter.finishWriting at some point.
It's interesting that you have 6 channel input. Are you using a special device or some kind of virtual device?

After compressing my audio file, why can I not play the file?

Audio file will not play after reducing it using AVAssetReader/ AVAssetWriter
At the moment, the whole function is being executed fine, with no errors thrown.
For some reason, when I go inside the document directory of the simulator via terminal, the audio file will not play through iTunes and comes up with error when trying to open with quicktime "QuickTime Player can't open "test1.m4a"
Does anyone specialise in this area and understand why this isn't working?
protocol FileConverterDelegate {
func fileConversionCompleted()
}
class WKAudioTools: NSObject {
var delegate: FileConverterDelegate?
var url: URL?
var assetReader: AVAssetReader?
var assetWriter: AVAssetWriter?
func convertAudio() {
let documentDirectory = try! FileManager.default.url(for: .documentDirectory, in: .userDomainMask, appropriateFor: nil, create: true)
let exportURL = documentDirectory.appendingPathComponent(Assets.soundName1).appendingPathExtension("m4a")
url = Bundle.main.url(forResource: Assets.soundName1, withExtension: Assets.mp3)
guard let assetURL = url else { return }
let asset = AVAsset(url: assetURL)
//reader
do {
assetReader = try AVAssetReader(asset: asset)
} catch let error {
print("Error with reading >> \(error.localizedDescription)")
}
let assetReaderOutput = AVAssetReaderAudioMixOutput(audioTracks: asset.tracks, audioSettings: nil)
//let assetReaderOutput = AVAssetReaderTrackOutput(track: track!, outputSettings: nil)
guard let assetReader = assetReader else {
print("reader is nil")
return
}
if assetReader.canAdd(assetReaderOutput) == false {
print("Can't add output to the reader ☹️")
return
}
assetReader.add(assetReaderOutput)
// writer
do {
assetWriter = try AVAssetWriter(outputURL: exportURL, fileType: .m4a)
} catch let error {
print("Error with writing >> \(error.localizedDescription)")
}
var channelLayout = AudioChannelLayout()
memset(&channelLayout, 0, MemoryLayout.size(ofValue: channelLayout))
channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo
// use different values to affect the downsampling/compression
let outputSettings: [String: Any] = [AVFormatIDKey: kAudioFormatMPEG4AAC,
AVSampleRateKey: 44100.0,
AVNumberOfChannelsKey: 2,
AVEncoderBitRateKey: 128000,
AVChannelLayoutKey: NSData(bytes: &channelLayout, length: MemoryLayout.size(ofValue: channelLayout))]
let assetWriterInput = AVAssetWriterInput(mediaType: .audio, outputSettings: outputSettings)
guard let assetWriter = assetWriter else { return }
if assetWriter.canAdd(assetWriterInput) == false {
print("Can't add asset writer input ☹️")
return
}
assetWriter.add(assetWriterInput)
assetWriterInput.expectsMediaDataInRealTime = false
// MARK: - File conversion
assetWriter.startWriting()
assetReader.startReading()
let audioTrack = asset.tracks[0]
let startTime = CMTime(seconds: 0, preferredTimescale: audioTrack.naturalTimeScale)
assetWriter.startSession(atSourceTime: startTime)
// We need to do this on another thread, so let's set up a dispatch group...
var convertedByteCount = 0
let dispatchGroup = DispatchGroup()
let mediaInputQueue = DispatchQueue(label: "mediaInputQueue")
//... and go
dispatchGroup.enter()
assetWriterInput.requestMediaDataWhenReady(on: mediaInputQueue) {
while assetWriterInput.isReadyForMoreMediaData {
let nextBuffer = assetReaderOutput.copyNextSampleBuffer()
if nextBuffer != nil {
assetWriterInput.append(nextBuffer!) // FIXME: Handle this safely
convertedByteCount += CMSampleBufferGetTotalSampleSize(nextBuffer!)
} else {
// done!
assetWriterInput.markAsFinished()
assetReader.cancelReading()
dispatchGroup.leave()
DispatchQueue.main.async {
// Notify delegate that conversion is complete
self.delegate?.fileConversionCompleted()
print("Process complete 🎧")
if assetWriter.status == .failed {
print("Writing asset failed ☹️ Error: ", assetWriter.error)
}
}
break
}
}
}
}
}
You need to call finishWriting on your AVAssetWriter to get the output completely written:
assetWriter.finishWriting {
DispatchQueue.main.async {
// Notify delegate that conversion is complete
self.delegate?.fileConversionCompleted()
print("Process complete 🎧")
if assetWriter.status == .failed {
print("Writing asset failed ☹️ Error: ", assetWriter.error)
}
}
}
If exportURL exists before you start the conversion, you should remove it, otherwise the conversion will fail:
try! FileManager.default.removeItem(at: exportURL)
As #matt points out, why the buffer stuff when you could do the conversion more simply with an AVAssetExportSession, and also why convert one of your own assets when you could distribute it already in the desired format?

AVAssetWriter queue guidance Swift 3

Can anyone give me some guidance on using queues in AVFoundation, please?
Later on, in my app, I want to do some processing on individual frames so I need to use AVCaptureVideoDataOutput.
To get started I thought I'd capture images and then write them (unprocessed) using AVAssetWriter.
I am successfully streaming frames from the camera to image preview by setting up an AVCaptureSession as follows:
func initializeCameraAndMicrophone() {
// set up the captureSession
captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPreset1280x720 // set resolution to Medium
// set up the camera
let camera = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
do {
let cameraInput = try AVCaptureDeviceInput(device: camera)
if captureSession.canAddInput(cameraInput){
captureSession.addInput(cameraInput)
}
} catch {
print("Error setting device camera input: \(error)")
return
}
videoOutputStream.setSampleBufferDelegate(self, queue: DispatchQueue(label: "sampleBuffer", attributes: []))
if captureSession.canAddOutput(videoOutputStream) {
captureSession.addOutput(videoOutputStream)
}
captureSession.startRunning()
}
Each new frame then triggers the captureOutput delegate:
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!)
{
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer!)
let bufferImage = UIImage(ciImage: cameraImage)
DispatchQueue.main.async {
// send captured frame to the videoPreview
self.videoPreview.image = bufferImage
// if recording is active append bufferImage to video frame
while (recordingNow == true) {
print("OK we're recording!")
// append images to video
while (writerInput.isReadyForMoreMediaData) {
let lastFrameTime = CMTimeMake(Int64(frameCount), videoFPS)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
pixelBufferAdaptor.append(pixelBuffer!, withPresentationTime: presentationTime)
frameCount += 1
}
}
}
}
So this streams frames to the image preview perfectly until I press the record button which calls the startVideoRecording function (which sets up AVAssetWriter). From that point on the delegate never gets called again!
AVAssetWriter is being set up like this:
func startVideoRecording() {
guard let assetWriter = createAssetWriter(path: filePath!, size: videoSize) else {
print("Error converting images to video: AVAssetWriter not created")
return
}
// AVAssetWriter exists so create AVAssetWriterInputPixelBufferAdaptor
let writerInput = assetWriter.inputs.filter{ $0.mediaType == AVMediaTypeVideo }.first!
let sourceBufferAttributes: [String : AnyObject] = [
kCVPixelBufferPixelFormatTypeKey as String : Int(kCVPixelFormatType_32ARGB) as AnyObject,
kCVPixelBufferWidthKey as String : videoSize.width as AnyObject,
kCVPixelBufferHeightKey as String : videoSize.height as AnyObject,
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: writerInput, sourcePixelBufferAttributes: sourceBufferAttributes)
// Start writing session
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: kCMTimeZero)
if (pixelBufferAdaptor.pixelBufferPool == nil) {
print("Error converting images to video: pixelBufferPool nil after starting session")
assetWriter.finishWriting{
print("assetWritter stopped!")
}
recordingNow = false
return
}
frameCount = 0
print("Recording started!")
}
I'm new to AVFoundation but I suspect I'm screwing up my queues somewhere.
You have to use a separate serial queue for capturing video/audio.
Add this queue property to your class:
let captureSessionQueue: DispatchQueue = DispatchQueue(label: "sampleBuffer", attributes: [])
Start the session on captureSessionQueue, according to the Apple docs:
The startRunning() method is a blocking call which can take some time, therefore you should
perform session setup on a serial queue so that the main queue isn't blocked (which keeps the UI responsive).
captureSessionQueue.async {
captureSession.startRunning()
}
Set this queue to your capture output pixel buffer delegate:
videoOutputStream.setSampleBufferDelegate(self, queue: captureSessionQueue)
Call startVideoRecording inside captureSessionQueue:
captureSessionQueue.async {
startVideoRecording()
}
In the captureOutput delegate method put all AVFoundation methods calls into captureSessionQueue.async:
DispatchQueue.main.async
{
// send captured frame to the videoPreview
self.videoPreview.image = bufferImage
captureSessionQueue.async {
// if recording is active append bufferImage to video frame
while (recordingNow == true){
print("OK we're recording!")
/// Append images to video
while (writerInput.isReadyForMoreMediaData) {
let lastFrameTime = CMTimeMake(Int64(frameCount), videoFPS)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
pixelBufferAdaptor.append(pixelBuffer!, withPresentationTime: presentationTime)
frameCount += 1
}
}
}
}

Media type of sample buffer must match receiver's media type ("soun")

based on this answer https://stackoverflow.com/a/16035330/1615183 I created the following code in Swift for compressing a video:
var videoWriter:AVAssetWriter!
var videoWriterInput:AVAssetWriterInput!
var processingQueue:dispatch_queue_t = dispatch_queue_create("processingQueue1", nil)
var processingQueue2:dispatch_queue_t = dispatch_queue_create("processingQueue2", nil)
var audioWriterInput:AVAssetWriterInput!
func encode(){
NSFileManager.defaultManager().removeItemAtURL(self.outputFile, error: nil)
let videoCleanApertureSettings = [AVVideoCleanApertureHeightKey: 720,
AVVideoCleanApertureWidthKey: 1280,
AVVideoCleanApertureHorizontalOffsetKey: 2,
AVVideoCleanApertureVerticalOffsetKey: 2
]
let codecSettings = [AVVideoAverageBitRateKey: 1024000,
AVVideoCleanApertureKey: videoCleanApertureSettings
]
let videoSettings = [AVVideoCodecKey: AVVideoCodecKey,
AVVideoCompressionPropertiesKey: codecSettings,
AVVideoHeightKey: 720, AVVideoWidthKey: 1280]
//setup video writer
var error:NSError?
let asset = AVURLAsset(URL: self.inputFile, options: nil)
let videoTrack:AVAssetTrack = asset.tracksWithMediaType(AVMediaTypeVideo)[0] as AVAssetTrack
let videoSize:CGSize = videoTrack.naturalSize
videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
videoWriterInput.expectsMediaDataInRealTime = false
videoWriterInput.transform = videoTrack.preferredTransform
videoWriter = AVAssetWriter(URL: self.outputFile, fileType: AVFileTypeQuickTimeMovie, error: &error)
if videoWriter.canAddInput(videoWriterInput) {
videoWriter.addInput(videoWriterInput)
}else{
println("cant add video writer input")
return
}
//setup video reader
let videoReaderSettings = [ kCVPixelBufferPixelFormatTypeKey as String : kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange]
let videoReaderOutput:AVAssetReaderTrackOutput = AVAssetReaderTrackOutput(track: videoTrack, outputSettings: videoReaderSettings) // should it be videoReaderSettings?
let videoReader:AVAssetReader = AVAssetReader(asset: asset, error: &error)
if videoReader.canAddOutput(videoReaderOutput) {
videoReader.addOutput(videoReaderOutput)
}
//setup audio writer
audioWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: nil)
audioWriterInput.expectsMediaDataInRealTime = false
if videoWriter.canAddInput(audioWriterInput){
videoWriter.addInput(audioWriterInput)
}
//setup audio reader
let audioTrack:AVAssetTrack = asset.tracksWithMediaType(AVMediaTypeVideo)[0] as AVAssetTrack
let audioReaderOutput:AVAssetReaderOutput = AVAssetReaderTrackOutput(track: audioTrack, outputSettings: nil)
let audioReader:AVAssetReader = AVAssetReader(asset: asset, error: &error)
if audioReader.canAddOutput(audioReaderOutput) {
audioReader.addOutput(audioReaderOutput)
}else {
println("cant add audio reader")
return
}
videoWriter.startWriting()
videoReader.startReading()
videoWriter.startSessionAtSourceTime(kCMTimeZero)
videoWriterInput.requestMediaDataWhenReadyOnQueue(processingQueue) {
while self.videoWriterInput.readyForMoreMediaData {
println("First loop")
var sampleBuffer = videoReaderOutput.copyNextSampleBuffer()
if videoReader.status == .Reading && sampleBuffer != nil {
println("Appending")
self.videoWriterInput.appendSampleBuffer(sampleBuffer)
}else {
self.videoWriterInput.markAsFinished()
if videoReader.status == .Completed {
audioReader.startReading()
self.videoWriter.startSessionAtSourceTime(kCMTimeZero)
self.audioWriterInput.requestMediaDataWhenReadyOnQueue(self.processingQueue2) {
while self.audioWriterInput.readyForMoreMediaData {
println("Second loop")
var sampleBuffer2:CMSampleBufferRef? = audioReaderOutput.copyNextSampleBuffer()
if audioReader.status == .Reading && sampleBuffer2 != nil {
self.audioWriterInput.appendSampleBuffer(sampleBuffer2)
}else {
self.audioWriterInput.markAsFinished()
println("Audio finish")
self.videoWriter.finishWritingWithCompletionHandler { println("Done") }
}
}
}
}
else {
println("Video Reader not completed")
}
println("Finished")
break
}// else vidoSampleBuffer
}
}
}
However if I remove the audio part I get only an empty file. If I ran it as is I the first time that the second loop runs there is no problem, but on the second iteration it crashes with the following error:
*** Terminating app due to uncaught exception 'NSInvalidArgumentException', reason: '*** -[AVAssetWriterInput appendSampleBuffer:] Media type of sample buffer must match receiver's media type ("soun")'
Did anyone have the same problem?
Change the AVMediaTypeVideo to Audio:
let audioTrack:AVAssetTrack = asset.tracksWithMediaType(AVMediaTypeVideo)[0] as AVAssetTrack
should be
let audioTrack:AVAssetTrack = asset.tracksWithMediaType(AVMediaTypeAudio)[0] as AVAssetTrack