Appending to AVAssetWriterInputPixelBufferAdaptor fails - swift

I want to output a video file from the depth camera. Displaying the data works, but cannot append the pixelBuffer to the AVAssetWriterInputPixelBufferAdaptor. Unfortunately, there is no more info then a negative boolean.
In this function we setup the writer
func createWriter() {
do {
assetWriter = try AVAssetWriter(outputURL: movieURL() as URL, fileType: AVFileType.mov)
} catch let error as NSError {
print(error.localizedDescription)
return
}
let outputSettings = [
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : Int(currentVideoDimensions!.width),
AVVideoHeightKey : Int(currentVideoDimensions!.height)
] as [String : Any]
let assetWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
assetWriterVideoInput.transform = CGAffineTransform(rotationAngle: CGFloat(.pi / 2.0))
assetWriterVideoInput.expectsMediaDataInRealTime = true
let sourcePixelBufferAttributesDictionary = [
String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_32BGRA),
String(kCVPixelBufferWidthKey) : Int(currentVideoDimensions!.width),
String(kCVPixelBufferHeightKey) : Int(currentVideoDimensions!.height),
String(kCVPixelFormatOpenGLESCompatibility) : kCFBooleanTrue!
] as [String : Any]
assetWriterPixelBufferInput = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterVideoInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if assetWriter!.canAdd(assetWriterVideoInput) {
assetWriter!.add(assetWriterVideoInput)
} else {
print("no way\(assetWriterVideoInput)")
}
}
which is used later to append the pixelbuffers after a frame is recorded
public func captureOutput(_ captureOutput: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection) {
autoreleasepool {
connection.videoOrientation = AVCaptureVideoOrientation.landscapeLeft;
let pixelBuffer : CVPixelBuffer? = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer!)
let previewImage: CIImage
previewImage = depthMap ?? cameraImage
let displayImage = UIImage(ciImage: previewImage)
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)!
self.currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDescription)
self.currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer)
if self.isWriting {
if self.assetWriterPixelBufferInput?.assetWriterInput.isReadyForMoreMediaData == true {let success = self.assetWriterPixelBufferInput?.append(pixelBuffer!, withPresentationTime: self.currentSampleTime!)
if success == false {
print("Pixel Buffer failed")
}
}
}
DispatchQueue.main.async { [weak self] in
self?.imageView.image = displayImage
}
}
}
We checked Image and Buffer sizes and they were the same, appending still doesn't work and there is no proper error message.

Related

AVAssetWriterInput ReadyForMoreMediaData always false

I'm trying to record CVPixelbuffer in realtime.
But I can't append buffer because assetWriterInput.isReadyForMoreMediaData always return false.
Can someone explain why this value always false? Thank's.
class VideoRecorder{
static var shared = VideoRecorder()
var avAssetWriter: AVAssetWriter?
var Adaptor: AVAssetWriterInputPixelBufferAdaptor?
var Settings: RecorderSetting?
struct RecorderSetting{
var videoSetting: [String : Any]
var Path: URL
}
func makeVideoSettings(width: Int, height: Int, BitRate: Double) -> [String : Any]{
let VideoCompressionProperties = [
AVVideoAverageBitRateKey: Double(width * height) * BitRate
]
let videoSettings:[String : Any] = [
AVVideoCodecKey: AVVideoCodecType.hevc,
AVVideoWidthKey: width,
AVVideoHeightKey: height,
AVVideoCompressionPropertiesKey: VideoCompressionProperties
]
return videoSettings
}
func makePath(FileName: String) -> URL{
return URL(fileURLWithPath:
NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0] + "/\(FileName).mp4")
}
func setup(width: Int, height: Int,
BitRate: Double, FileName: String){
let setting = makeVideoSettings(width: width, height: height, BitRate: BitRate)
let Path = makePath(FileName: FileName)
Settings = RecorderSetting(videoSetting: setting, Path: Path)
}
func StartSession(FirstFrame: CVPixelBuffer) throws{
let attribute: [String : Any] = [
kCVPixelBufferPixelFormatTypeKey as String: CVPixelBufferGetPixelFormatType(FirstFrame),
kCVPixelBufferWidthKey as String: CVPixelBufferGetWidth(FirstFrame),
kCVPixelBufferHeightKey as String: CVPixelBufferGetHeight(FirstFrame)
]
if (Settings == nil){throw "Settings invalid"}
let writerInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: Settings!.videoSetting)
Adaptor =
AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: writerInput, sourcePixelBufferAttributes: attribute)
Adaptor?.assetWriterInput.expectsMediaDataInRealTime = true
do{
avAssetWriter = try AVAssetWriter(url: Settings!.Path, fileType: AVFileType.mp4)
if (avAssetWriter!.canAdd(writerInput)){
let StartTime = Date().timeIntervalSince1970.toCMTime()
avAssetWriter?.startWriting()
avAssetWriter?.startSession(atSourceTime: StartTime)
try? WriteBuffer(Buffer: FirstFrame, time: StartTime)
}else{
throw "Add AVWriterInput Error"
}
}catch{
throw "Initializing Error"
}
}
func StopSession(){
if(Adaptor?.assetWriterInput.isReadyForMoreMediaData == false){return}
Adaptor?.assetWriterInput.markAsFinished()
avAssetWriter?.finishWriting(completionHandler: {
if let outputPath = self.Settings?.Path{
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputPath)
}) { saved, error in
if saved {
try? FileManager().removeItem(at: outputPath)
print("Saved ")
/*let fetchOptions = PHFetchOptions()
fetchOptions.sortDescriptors = [NSSortDescriptor(key: "creationDate", ascending: false)]
let fetchResult = PHAsset.fetchAssets(with: .video, options: fetchOptions).firstObject
// fetchResult is your latest video PHAsset
// To fetch latest image replace .video with .image*/
}
}
}
})
}
func WriteBuffer(Buffer: CVPixelBuffer, time: CMTime) throws{
if(self.Adaptor != nil){
if (self.Adaptor!.assetWriterInput.isReadyForMoreMediaData){
let whetherPixelBufferAppendedtoAdaptor = self.Adaptor!.append(Buffer, withPresentationTime: time)
if(!whetherPixelBufferAppendedtoAdaptor){
print(avAssetWriter?.error as Any)
}
}else{
throw "Writer Input is not Ready"
}
}else{
throw "PixelBufferAdaptor invalild"
}
}
}
Find the problem.
This method AVAssetWriterInput.canAdd() only check can asset writer add the input,
You need to call .add() to add asset input before you start writing.

How can I read a value from key (stringValue)?

I need help....I only want to print the key value, I don't want to read all the Object
I created a QRCode using this code:
#IBAction func btnCreate(_ sender: Any) {
if sectionNameTxt.text == "" || sectionExtTxt.text == "" || sectionLocationTxt.text == "" {
}else{
let dic = ["sectionName": sectionNameTxt.text!,"sectionExt": sectionExtTxt.text!,"sectionLocation": sectionLocationTxt.text!]
print("dic:\(dic)")
imgView.image = QRGenerator.generate(from: dic)
do {
let jsonData = try JSONEncoder().encode(dic)
if let filter = CIFilter(name: "CIQRCodeGenerator") {
filter.setValue(jsonData, forKey: "inputMessage")
let transform = CGAffineTransform(scaleX: 10, y: 10)
if let output = filter.outputImage?.transformed(by: transform) {
imgView.image = UIImage(ciImage: output)
}
}
} catch {
print(error.localizedDescription)
}
}
}
This code is to read the QRCode:
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
if let metadataObject = metadataObjects.first{
guard let readleObject = metadataObject as? AVMetadataMachineReadableCodeObject else {return}
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
print((readleObject.stringValue!))
session.stopRunning()
self.dismiss(animated: true, completion: nil)
}
output :
{"sectionName":"pharmacy","sectionExt":"1010","sectionLocation":"Main Building - Ground Floor"}
I want read the value of the key1(sectionName) and key2(sectionExt) and key3(sectionLocation) ?
You can use JSONSerialization.
if let data = readleObject.stringValue.data(using: .utf8) {
if let json = try? JSONSerialization.jsonObject(with: data, options: []), let dataDict = json as? NSDictionary {
if let sectionName = dataDict["sectionName"] as? String
}
}
}
You can do the rest.

Recording video and audio with AVAssetWriter in Swift

Im trying to add the devices microphone audio to a video recording from the devices camera. The video is filtered with a CIFilter and works as expected. My problem is the mic audio is not attached to the video once saved.
I have tried setting the audio settings manually like this
let audioSettings : [String : Any] = [
AVFormatIDKey : kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey: 1,
AVSampleRateKey : 44100,
AVEncoderBitRateKey : 64000
]
but using the recommendedAudioSettingsForAssetWriter method seems like the correct approach as the video recording works with the recommendedAudioSettingsForAssetWriter method.
Can anyone tell me how to achieve this or point me in the right direction?
My code so far:
import UIKit
import AVFoundation
class VideoViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
#IBOutlet weak var imageView: UIImageView!
lazy var cameraDevice: AVCaptureDevice? = {
return AVCaptureDevice.default(for: .video)
}()
lazy var micDevice: AVCaptureDevice? = {
return AVCaptureDevice.default(for: .audio)
}()
var captureSession = AVCaptureSession()
var outputURL: URL!
var orientation: AVCaptureVideoOrientation = .landscapeRight
var filterObject = FilterObject()
var assetWriter: AVAssetWriter?
var assetWriterInput: AVAssetWriterInput?
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor?
var fileName = ""
var recordingState = RecordingState.idle
var time: Double = 0
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
let context = CIContext()
override func viewDidLoad() {
super.viewDidLoad()
setupCameraDevice()
setupAudioDevice()
setupInputOutput()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
setUpAuthStatus()
}
#IBAction func recordPressed(_ sender: UIButton) {
switch recordingState {
case .idle:
recordingState = .start
case .capturing:
recordingState = .end
default:
break
}
}
func setUpAuthStatus() {
if AVCaptureDevice.authorizationStatus(for: AVMediaType.video) != .authorized {
AVCaptureDevice.requestAccess(for: AVMediaType.video, completionHandler: { (authorized) in
DispatchQueue.main.async {
if authorized {
self.setupInputOutput()
}
}
})
}
if AVCaptureDevice.authorizationStatus(for: AVMediaType.audio) != .authorized {
AVCaptureDevice.requestAccess(for: AVMediaType.audio, completionHandler: { (authorized) in
DispatchQueue.main.async {
if authorized {
self.setupInputOutput()
}
}
})
}
}
func setupCameraDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == .back {
cameraDevice = device
}
}
}
func setupAudioDevice() {
let audioDeviceDisoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInMicrophone], mediaType: .audio, position: .unspecified)
let devices = audioDeviceDisoverySession.devices
micDevice = devices[0]
}
func setupInputOutput() {
do {
guard let cameraDevice = cameraDevice else { return }
let captureDeviceInput = try AVCaptureDeviceInput(device: cameraDevice)
guard let micDevice = micDevice else { return }
let micDeviceInput = try AVCaptureDeviceInput(device: micDevice)
captureSession.sessionPreset = AVCaptureSession.Preset.hd1920x1080
if captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
}
if captureSession.canAddInput(micDeviceInput) {
captureSession.addInput(micDeviceInput)
}
let queue = DispatchQueue(label: "com.apple.sample.capturepipeline.video", attributes: [])
if captureSession.canAddOutput(videoOutput) {
videoOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(videoOutput)
}
if captureSession.canAddOutput(audioOutput) {
audioOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(audioOutput)
}
captureSession.commitConfiguration()
captureSession.startRunning()
} catch {
print(error)
}
}
func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
audioOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
let cameraImage = CIImage(cvImageBuffer: imageBuffer)
guard let name = filterObject.name else {return}
let effect = FilterType.genericFilter(name: name, cameraImage: cameraImage)
effect.setValue(cameraImage, forKey: kCIInputImageKey)
TableData.setFilterValues(withFilterName: name, effect: effect, values: [value1, value2])
guard let outputImage = effect.outputImage else { return }
context.render(outputImage, to: imageBuffer)
guard let cgImage = self.context.createCGImage(outputImage, from: cameraImage.extent) else { return }
DispatchQueue.main.async {
let filteredImage = UIImage(cgImage: cgImage)
self.imageView.image = filteredImage
}
let timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer).seconds
switch recordingState {
case .start:
fileName = UUID().uuidString
let videoPath = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(fileName).mov")
let writer = try! AVAssetWriter(outputURL: videoPath, fileType: .mov)
let videoSettings = videoOutput.recommendedVideoSettingsForAssetWriter(writingTo: .mov)
let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
videoInput.mediaTimeScale = CMTimeScale(bitPattern: 600)
videoInput.expectsMediaDataInRealTime = true
let audioSettings = audioOutput.recommendedAudioSettingsForAssetWriter(writingTo: .m4a)
let audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings as? [String : Any])
audioInput.expectsMediaDataInRealTime = true
//videoInput.transform = CGAffineTransform(rotationAngle: .pi/2)
let pixelAdapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: nil)
if writer.canAdd(videoInput) {
writer.add(videoInput)
}
if writer.canAdd(audioInput) {
writer.add(audioInput)
}
writer.startWriting()
writer.startSession(atSourceTime: .zero)
assetWriter = writer
assetWriterInput = videoInput
pixelBufferAdaptor = pixelAdapter
recordingState = .capturing
time = timestamp
case .capturing:
if assetWriterInput?.isReadyForMoreMediaData == true {
let newTime = CMTime(seconds: timestamp - time, preferredTimescale: CMTimeScale(600))
pixelBufferAdaptor?.append(imageBuffer, withPresentationTime: newTime)
}
break
case .end:
guard assetWriterInput?.isReadyForMoreMediaData == true, assetWriter!.status != .failed else { break }
let url = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(fileName).mov")
assetWriterInput?.markAsFinished()
assetWriter?.finishWriting { [weak self] in
self?.recordingState = .idle
self?.assetWriter = nil
self?.assetWriterInput = nil
DispatchQueue.main.async {
let activity = UIActivityViewController(activityItems: [url], applicationActivities: nil)
self?.present(activity, animated: true, completion: nil)
}
}
default:
break
}
}
}
Your audio settings do not look correct. The AVSampleRateKey should come from the number of samples from the description of the first audio sample that comes in. Your value of 44100 should be set as the value for the AVEncoderBitRateKey and that should maybe be set to AVEncoderBitRateKey: Int(48_000)
To get the number of sample first call
let fmt = CMSampleBufferGetFormatDescription(sampleBuffer)
let asbd = CMAudioFormatDescriptionGetStreamBasicDescription(fmt!)
and then the sample rate will be asbd?.pointee.mSampleRate and then that should be set as the AVSampleRateKey value in the audio settings (I think)

func captureOutput is never called

Add like to add filter to each frame i record in real time and display the filtered image in UIImageView, if anyone could help it would be nice.
but captureoutput is never called, here is my code.
class Measurement: UIViewController , AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var cameraPreview: UIView!
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
setupCameraSession()
toggleTorch(on: true)
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
view.layer.addSublayer(previewLayer)
cameraSession.startRunning()
}
lazy var cameraSession: AVCaptureSession = {
let s = AVCaptureSession()
s.sessionPreset = AVCaptureSession.Preset.low
return s
}()
lazy var previewLayer: AVCaptureVideoPreviewLayer = {
let preview = AVCaptureVideoPreviewLayer(session: self.cameraSession)
preview.position = CGPoint(x:182,y: 485)
preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
preview.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
preview.bounds = imageView.bounds
//preview.position = CGPoint(x:self.view.bounds.midX,y: self.view.bounds.midY)
imageView.layer.addSublayer(preview)
return preview
}()
func toggleTorch(on: Bool) {
guard let device = AVCaptureDevice.default(for: .video) else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
func setupCameraSession() {
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
do {
let deviceInput = try AVCaptureDeviceInput(device: captureDevice!)
cameraSession.beginConfiguration()
if (cameraSession.canAddInput(deviceInput) == true) {
cameraSession.addInput(deviceInput)
print("Processing Data.")
}
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA as UInt32)] as [String : AnyObject]
dataOutput.alwaysDiscardsLateVideoFrames = true
print("Processing Data.")
if (cameraSession.canAddOutput(dataOutput) == true) {
cameraSession.addOutput(dataOutput)
print("Processing Data.")
}
cameraSession.commitConfiguration()
let queue = DispatchQueue(label: "com.invasivecode.videoQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
catch let error as NSError {
print("\(error), \(error.localizedDescription)")
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("Processing Data.")
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
//let chromaKeyFilter = colorCubeFilterForChromaKey(hueAngle: 120)
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
let context = CIContext()
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return }
let image = UIImage(cgImage: cgImage)
if let chromaKeyFilter = CIFilter(name: "CISepiaTone") {
let beginImage = CIImage(image: image)
chromaKeyFilter.setValue(beginImage, forKey: kCIInputImageKey)
chromaKeyFilter.setValue(0.5, forKey: kCIInputIntensityKey)
if let output = chromaKeyFilter.outputImage {
if let cgimg = context.createCGImage(output, from: output.extent) {
let processedImage = UIImage(cgImage: cgimg)
// do something interesting with the processed image
imageView.image = processedImage
}
}
}
}
func captureOutput(_ captureOutput: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// Here you can count how many frames are dopped
}
func startCapture() {
print("\(self.classForCoder)/" + #function)
if cameraSession.isRunning {
print("already running")
return
}
cameraSession.startRunning()
toggleTorch(on: true)
}
You need to set the delegate
dataOutput.sampleBufferDelegate = self

How to add transitions to set images exported as video in Swift

I am making an app which has functionality for Transitions like example-video should play from top to bottom or from left to right transitions effect.I am able to generate video from set of image array in swift 3.0 but i am not getting how to add transitions.
I got the code from this answer How do I export UIImage array as a movie?
I am using like this working fine only problem to get trasnitions
func writeImagesAsMovie(allImages: [UIImage], videoPath: String, videoSize: CGSize, videoFPS: Int32) {
// Create AVAssetWriter to write video
guard let assetWriter = createAssetWriter(videoPath, size: videoSize) else {
print("Error converting images to video: AVAssetWriter not created")
return
}
// If here, AVAssetWriter exists so create AVAssetWriterInputPixelBufferAdaptor
let writerInput = assetWriter.inputs.filter{ $0.mediaType == AVMediaTypeVideo }.first!
let sourceBufferAttributes : [String : AnyObject] = [
kCVPixelBufferPixelFormatTypeKey as String : Int(kCVPixelFormatType_32ARGB) as AnyObject,
kCVPixelBufferWidthKey as String : videoSize.width as AnyObject,
kCVPixelBufferHeightKey as String : videoSize.height as AnyObject,
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: writerInput, sourcePixelBufferAttributes: sourceBufferAttributes)
// Start writing session
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: kCMTimeZero)
if (pixelBufferAdaptor.pixelBufferPool == nil) {
print("Error converting images to video: pixelBufferPool nil after starting session")
return
}
// -- Create queue for <requestMediaDataWhenReadyOnQueue>
let mediaQueue = DispatchQueue(label: "mediaInputQueue", attributes: [])
// -- Set video parameters
let frameDuration = CMTimeMake(1, videoFPS)
var frameCount = 0
// -- Add images to video
let numImages = allImages.count
writerInput.requestMediaDataWhenReady(on: mediaQueue, using: { () -> Void in
// Append unadded images to video but only while input ready
while (writerInput.isReadyForMoreMediaData && frameCount < numImages) {
let lastFrameTime = CMTimeMake(Int64(frameCount), videoFPS)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
if !self.appendPixelBufferForImageAtURL(image: allImages[frameCount], pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
print("Error converting images to video: AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer")
return
}
frameCount += 1
}
// No more images to add? End video.
if (frameCount >= numImages) {
writerInput.markAsFinished()
assetWriter.finishWriting {
if (assetWriter.error != nil) {
print("Error converting images to video: \(String(describing: assetWriter.error))")
} else {
print("Converted images to movie # \(videoPath)")
}
}
}
})
}
func createAssetWriter(_ path: String, size: CGSize) -> AVAssetWriter? {
// Convert <path> to NSURL object
let pathURL = URL(fileURLWithPath: path)
// Return new asset writer or nil
do {
// Create asset writer
let newWriter = try AVAssetWriter(outputURL: pathURL, fileType: AVFileTypeMPEG4)
// Define settings for video input
let videoSettings: [String : AnyObject] = [
AVVideoCodecKey : AVVideoCodecH264 as AnyObject,
AVVideoWidthKey : size.width as AnyObject,
AVVideoHeightKey : size.height as AnyObject,
]
// Add video input to writer
let assetWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
newWriter.add(assetWriterVideoInput)
// Return writer
print("Created asset writer for \(size.width)x\(size.height) video")
return newWriter
} catch {
print("Error creating asset writer: \(error)")
return nil
}
}
func appendPixelBufferForImageAtURL(image: UIImage, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
var appendSucceeded = false
autoreleasepool {
if let pixelBufferPool = pixelBufferAdaptor.pixelBufferPool {
let pixelBufferPointer = UnsafeMutablePointer<CVPixelBuffer?>.allocate(capacity:1)
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(
kCFAllocatorDefault,
pixelBufferPool,
pixelBufferPointer
)
if let pixelBuffer = pixelBufferPointer.pointee , status == 0 {
fillPixelBufferFromImage(image: image, pixelBuffer: pixelBuffer)
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
pixelBufferPointer.deinitialize()
} else {
NSLog("Error: Failed to allocate pixel buffer from pool")
}
pixelBufferPointer.deallocate(capacity: 1)
}
}
return appendSucceeded
}
func fillPixelBufferFromImage(image: UIImage, pixelBuffer: CVPixelBuffer) {
CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
// Create CGBitmapContext
let context = CGContext(
data: pixelData,
width: Int(image.size.width),
height: Int(image.size.height),
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer),
space: rgbColorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue
)
// Draw image into context
context?.draw(image .cgImage!, in: CGRect(x: 0.0,y: 0.0,width: image.size.width,height: image .size.height))
CVPixelBufferUnlockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
}