Video recording fails when adding timestamp to video - swift

I'm making iOS video recording app. When I convert CMSampleBuffer to UIImage and add timestamp and then covert it back to CMSampleBuffer and append to AVAssetWriterInput. When I did not add timestamp and add received CMSampleBuffer directly to AVAssetWriterInput it finish recording correctly but when I add timestamp to received image, it fails recording video. How can I fix this?
private func setupCaptureSession() {
session.sessionPreset = .vga640x480
guard
let videoDevice = AVCaptureDevice.default(for: .video),
let audioDevice = AVCaptureDevice.default(for: .audio),
let videoInput = try? AVCaptureDeviceInput(device: videoDevice),
let audioInput = try? AVCaptureDeviceInput(device: audioDevice) else {
fatalError()
}
session.beginConfiguration()
session.addInput(videoInput)
session.addInput(audioInput)
session.addOutput(videoOutput)
session.addOutput(audioOutput)
session.commitConfiguration()
DispatchQueue.main.async { [weak self] in
self?.session.startRunning()
}
}
private func startRecording() {
self.startUnixtime = DateUtility.getUnixtime()
self.startTimeForDisplayingTimeCounter = Date()
self.startTimer()
self.elapsedTimeLabel.text = "00:00:00"
// AVAssetWriter
assetWriter = try! AVAssetWriter(outputURL: self.exportURL!, fileType: .mov)
// video
let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: [
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : 640,
AVVideoHeightKey : 480
])
videoInput.expectsMediaDataInRealTime = true
assetWriter?.add(videoInput)
// audio
let audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: nil)
audioInput.expectsMediaDataInRealTime = true
assetWriter?.add(audioInput)
assetWriter?.startWriting()
// Delegate
let queue = DispatchQueue.global()
videoOutput.setSampleBufferDelegate(self, queue: queue)
audioOutput.setSampleBufferDelegate(self, queue: queue)
}
private func finishRecording() {
self.endUnixtime = DateUtility.getUnixtime()
self.elapsedTimeLabel.text = "00:00:00"
self.timer?.invalidate()
videoOutput.setSampleBufferDelegate(nil, queue: nil)
audioOutput.setSampleBufferDelegate(nil, queue: nil)
startTime = nil
assetWriter?.finishWriting { [weak self] in
guard let self = self else { return }
guard self.assetWriter!.status == .completed else { fatalError("failed recording") }
self.saveToPhotoLibrary { isSaveToPhotoLibrarySucceed in
print("vide saved to photo library")
guard isSaveToPhotoLibrarySucceed else {
print("Save to photo library failed")
return
}
self.saveToRealmFromTempVideo {
self.uploadVideoToServer()
}
}
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard isRecording else { fatalError() }
guard CMSampleBufferDataIsReady(sampleBuffer) else {
print("not ready")
return
}
if startTime == nil {
startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
assetWriter?.startSession(atSourceTime: startTime!)
}
// Append video or audio
let mediaType: AVMediaType = output is AVCaptureVideoDataOutput ? .video : .audio
if mediaType == .video {
appendVideo(from: sampleBuffer)
} else if mediaType == .audio {
appendAudio(from: sampleBuffer)
} else {
fatalError("should not reach here")
}
}
// Append Video
func appendVideo(from sampleBuffer: CMSampleBuffer) {
// - Guards
guard let videoInput = assetWriter?.inputs.first(where: { $0.mediaType == .video }) else {
print("video input not found")
return
}
guard videoInput.isReadyForMoreMediaData else {
print("video input not ready for more media data")
return
}
// - Timestamp
let sample: Sample = Sample(sampleBuffer: sampleBuffer)
guard let ciImage = generateCIImage(from: sampleBuffer) else {
print("CIImage creation from sampleBuffer failed")
return
}
let uiImage = UIImage(ciImage: ciImage)
guard let timestampAddedImage = self.addTimestamp(on: uiImage) else {
fatalError("should not reach here")
}
guard let timestampAddedCvpixelBuffer = timestampAddedImage.toCVPixelBuffer() else {
print("CVPixelBuffer creation from CIImage failed")
return
}
guard let timestampAddedSampleBuffer = generateCMSampleBuffer(from: timestampAddedCvpixelBuffer, timingInfo: sample.timingInfo) else {
print("CMSampleBuffer creation from CVPixelBufer failed")
return
}
DispatchQueue.main.sync { [weak self] in
self?.compositeImageView.image = timestampAddedImage
}
print("append video")
videoInput.append(timestampAddedSampleBuffer)
}
func addTimestamp(on image: UIImage) -> UIImage? {
let imageRect = CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height)
UIGraphicsBeginImageContextWithOptions(image.size, true, 0.0)
image.draw(in: imageRect)
// Text Attributes
let textColor = UIColor.white
let textFont = UIFont.systemFont(ofSize: FontSize.sizeL, weight: .bold)
let textFontAttributes = [
NSAttributedString.Key.font: textFont,
NSAttributedString.Key.foregroundColor: textColor,
NSAttributedString.Key.backgroundColor: UIColor(hex: ColorConstants.black, alpha: 0.4)
]
let formatter = DateFormatter()
formatter.dateFormat = "yyyy/MM/dd HH:mm:ss:SSS"
let timestamp: NSString = formatter.string(from: Date()) as NSString
let textRect = CGRect(x: 6.0, y: 6.0, width: image.size.width, height: 32)
timestamp.draw(in: textRect, withAttributes: textFontAttributes)
// New Image
let newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext()
return newImage
}
func generateCMSampleBuffer(from cvPixelBuffer: CVPixelBuffer, timingInfo: CMSampleTimingInfo) -> CMSampleBuffer? {
var sampleBuffer: CMSampleBuffer?
var timimgInfo: CMSampleTimingInfo = timingInfo
var videoInfo: CMVideoFormatDescription!
CMVideoFormatDescriptionCreateForImageBuffer(allocator: nil, imageBuffer: cvPixelBuffer, formatDescriptionOut: &videoInfo)
CMSampleBufferCreateForImageBuffer(allocator: kCFAllocatorDefault,
imageBuffer: cvPixelBuffer,
dataReady: true,
makeDataReadyCallback: nil,
refcon: nil,
formatDescription: videoInfo,
sampleTiming: &timimgInfo,
sampleBufferOut: &sampleBuffer)
return sampleBuffer
}
private extension UIImage {
func toCVPixelBuffer() -> CVPixelBuffer? {
let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue, kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
var pixelBuffer : CVPixelBuffer?
let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(self.size.width), Int(self.size.height), kCVPixelFormatType_32ARGB, attrs, &pixelBuffer)
guard status == kCVReturnSuccess else {
return nil
}
if let pixelBuffer = pixelBuffer {
CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: pixelData, width: Int(self.size.width), height: Int(self.size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
context?.translateBy(x: 0, y: self.size.height)
context?.scaleBy(x: 1.0, y: -1.0)
UIGraphicsPushContext(context!)
self.draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
UIGraphicsPopContext()
CVPixelBufferUnlockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
return pixelBuffer
}
return nil
}
}
private final class Sample {
let timingInfo: CMSampleTimingInfo
init(sampleBuffer: CMSampleBuffer) {
let presentationTimeStamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
let duration = CMSampleBufferGetDuration(sampleBuffer)
let decodeTimeStamp = CMSampleBufferGetDecodeTimeStamp(sampleBuffer)
timingInfo = CMSampleTimingInfo(duration: duration, presentationTimeStamp: presentationTimeStamp, decodeTimeStamp: decodeTimeStamp)
}
}

Related

How to achieve better performance in converting UIView to CVPixelBuffer?

I'm wondering if it's possible to achieve a better performance in converting the UIView into CVPixelBuffer.
My app converts a sequence of UIViews first into UIImages and then into CVPixelBuffers as shown below. In the end, I record all these images/frames into an AVAssetWriterInput and save the result as a movie file.
Thank you in advance!
Best, Aibek
func viewToImage(view: UIView) -> CGImage {
let rect: CGRect = container.frame
UIGraphicsBeginImageContextWithOptions(rect.size, true, 1)
let context: CGContext = UIGraphicsGetCurrentContext()!
view.layer.render(in: context)
let img = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return img!.cgImage
}
func imageToBuffer(image: CGImage) -> CVPixelBuffer? {
let frameSize = CGSize(width: image.width, height: image.height)
var pixelBuffer: CVPixelBuffer?
let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(frameSize.width), Int(frameSize.height), kCVPixelFormatType_32BGRA, nil, &pixelBuffer)
if status != kCVReturnSuccess {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
let data = CVPixelBufferGetBaseAddress(pixelBuffer!)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue)
let context = CGContext(data: data, width: Int(frameSize.width), height: Int(frameSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: bitmapInfo.rawValue)
context?.draw(image, in: CGRect(x: 0, y: 0, width: image.width, height: image.height))
CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
return pixelBuffer
}
You'd better to see this. https://stackoverflow.com/a/61862728/13680955
In short, this sample converts UIView to MTLTexture in 12ms.
Sure you can use CVPixelBuffer directly, but I used MTLTexture to make video and no issue was on it.
If you are struggling with the performance, too slow or weird to use, try to do this.
With MTLTexture
import AVFoundation
import MetalKit
class VideoRecorder {
let assetWriter: AVAssetWriter
let assetWriterVideoInput: AVAssetWriterInput
let assetWriterInputPixelBufferAdapter: AVAssetWriterInputPixelBufferAdaptor
var recordingStartTime = TimeInterval(0)
var recordingElapsedTime = TimeInterval(0)
let url: URL = {
let fileName = "exported_video.mp4"
return FileManager.default.temporaryDirectory.appendingPathComponent(fileName)
}()
init(outputSize: CGSize) throws {
if FileManager.default.fileExists(atPath: url.path) {
try FileManager.default.removeItem(at: url)
}
let fileType: AVFileType = .mov
assetWriter = try AVAssetWriter(outputURL: url, fileType: fileType)
let mediaType: AVMediaType = .video
let outputSettings: [String: Any] = [
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoWidthKey: outputSize.width,
AVVideoHeightKey: outputSize.height
]
assetWriterVideoInput = AVAssetWriterInput(mediaType: mediaType, outputSettings: outputSettings)
assetWriterVideoInput.expectsMediaDataInRealTime = false
let sourcePixelBufferAttributes: [String: Any] = [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
kCVPixelBufferWidthKey as String: outputSize.width,
kCVPixelBufferHeightKey as String: outputSize.height
]
assetWriterInputPixelBufferAdapter = AVAssetWriterInputPixelBufferAdaptor(
assetWriterInput: assetWriterVideoInput, sourcePixelBufferAttributes: sourcePixelBufferAttributes)
assetWriter.add(assetWriterVideoInput)
}
private static func currentTimestampString() -> String {
let date = Date()
let dateFormatter = DateFormatter()
dateFormatter.dateFormat = "yyyy-MM-dd HH:mm:ss"
return dateFormatter.string(from: date)
}
public func start() {
print("videoRecorder.start")
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: .zero)
recordingStartTime = CACurrentMediaTime()
}
public func cancel() {
#if DEBUG
print("videoRecorder.cancel")
#endif
assetWriterVideoInput.markAsFinished()
assetWriter.cancelWriting()
}
public func finish(_ callback: #escaping () -> Void) {
print("videoRecorder.finish")
assetWriterVideoInput.markAsFinished()
assetWriter.finishWriting {
self.recordingElapsedTime = CACurrentMediaTime() - self.recordingStartTime
print("videoRecorder.finish elapsedTime: \(self.recordingElapsedTime)")
callback()
}
}
private var pixelBuffer: CVPixelBuffer?
public func writeFrame(texture: MTLTexture, at presentationTime: CMTime) {
print("videoRecorder.writeFrame: \(presentationTime)")
if pixelBuffer == nil {
guard let pixelBufferPool = assetWriterInputPixelBufferAdapter.pixelBufferPool else {
print("Pixel buffer asset writer input did not have a pixel buffer pool available;")
print("cannot retrieve frame")
return
}
var maybePixelBuffer: CVPixelBuffer?
let status = CVPixelBufferPoolCreatePixelBuffer(nil, pixelBufferPool, &maybePixelBuffer)
if status != kCVReturnSuccess {
print("Could not get pixel buffer from asset writer input; dropping frame...")
return
}
pixelBuffer = maybePixelBuffer
print("videoRecorder.writeFrame: pixelBuffer was created: \(String(describing: pixelBuffer))")
}
guard let pixelBuffer = pixelBuffer else {
print("videoRecorder.writeFrame: NO pixelBuffer")
return
}
writeFrame(texture: texture, at: presentationTime, with: pixelBuffer)
}
private func writeFrame(texture: MTLTexture, at presentationTime: CMTime, with pixelBuffer: CVPixelBuffer) {
while !assetWriterVideoInput.isReadyForMoreMediaData {
//
print("NOT ready for more media data at: \(presentationTime)")
}
CVPixelBufferLockBaseAddress(pixelBuffer, [])
let pixelBufferBytes = CVPixelBufferGetBaseAddress(pixelBuffer)!
// Use the bytes per row value from the pixel buffer since its stride may be rounded up to be 16-byte aligned
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let region = MTLRegionMake2D(0, 0, texture.width, texture.height)
texture.getBytes(pixelBufferBytes, bytesPerRow: bytesPerRow, from: region, mipmapLevel: 0)
assetWriterInputPixelBufferAdapter.append(pixelBuffer, withPresentationTime: presentationTime)
CVPixelBufferUnlockBaseAddress(pixelBuffer, [])
}
}
Converting the UIViews into MTLTextures and recording them into a video file using the Recorder provided by Mayo didn't increase the performance actually.
However, the recorder is able to write MTLTextures in real-time. That meant for me that I can re-write all the animations using Metal and use the recorder.

UIImage sometimes flipped [duplicate]

If I use the image before it is saved it is normal. But if I save it and use it later is is 90 degrees turned. How can I make sure it doesn't save sideways?
func saveEvent(_ center1: CLLocation, title2: String, imagePicked1: UIImage)
{
let data = UIImagePNGRepresentation(imagePicked1);///
let url = NSURL(fileURLWithPath: NSTemporaryDirectory()).appendingPathComponent(NSUUID().uuidString+".dat")
do {
try data!.write(to: url!, options: [])
} catch let e as NSError {
print("Error! \(e)");
return
}
let image11 = CKAsset(fileURL: url!)
self.eventRecord.setObject(image11 as CKAsset, forKey: "Picture")
let publicData = CKContainer.default().publicCloudDatabase
publicData.save(self.eventRecord, completionHandler: { record, error in
if error == nil
{
print("Image saved")
}else{
print(error!)
}
})
}
If you need to save your PNG with correct rotation you will need to redraw your image if its orientation it is not .up. You can redraw it as follow:
extension UIImage {
func png(isOpaque: Bool = true) -> Data? { flattened(isOpaque: isOpaque)?.pngData() }
func flattened(isOpaque: Bool = true) -> UIImage? {
if imageOrientation == .up { return self }
UIGraphicsBeginImageContextWithOptions(size, isOpaque, scale)
defer { UIGraphicsEndImageContext() }
draw(in: CGRect(origin: .zero, size: size))
return UIGraphicsGetImageFromCurrentImageContext()
}
}
edit/update:
For iOS10+ tvOS10+ you can use UIGraphicsImageRenderer:
extension UIImage {
func png(isOpaque: Bool = true) -> Data? { flattened(isOpaque: isOpaque).pngData() }
func flattened(isOpaque: Bool = true) -> UIImage {
if imageOrientation == .up { return self }
let format = imageRendererFormat
format.opaque = isOpaque
return UIGraphicsImageRenderer(size: size, format: format).image { _ in draw(at: .zero) }
}
}
Playground testing:
Usage for images without transparency:
let image = UIImage(data: try! Data(contentsOf: URL(string: "https://i.stack.imgur.com/varL9.jpg")!))!
if let data = image.png() {
let imageFromPNGData = UIImage(data: data)
}
With transparency :
if let data = image.png(isOpaque: false) {
let imageFromPNGData = UIImage(data: data)
}
Just convert the image to JPEG data instead. No need to redraw your image:
let imageData = image.jpegData(compressionQuality: 1.0)
You can use this as well to prevent it from changing of orientation.
func rotateImage(image: UIImage) -> UIImage? {
if (image.imageOrientation == UIImage.Orientation.up ) {
return image
}
UIGraphicsBeginImageContext(image.size)
image.draw(in: CGRect(origin: CGPoint.zero, size: image.size))
let copy = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return copy
}

Video local file URL not compatible with saved photos album

I am converting a single image into a video and then trying to save the video into the camera roll, but UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(url.path) is always false.
Selecting image from camera roll:
if UIImagePickerController.isSourceTypeAvailable(.photoLibrary){
let imagePicker = UIImagePickerController()
imagePicker.delegate = self
imagePicker.sourceType = .photoLibrary
imagePicker.allowsEditing = false
present(imagePicker, animated: true, completion: nil)
}
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
dismiss(animated: true, completion: nil)
var message = ""
if let img = info[UIImagePickerController.InfoKey.originalImage] as? UIImage {
self.img = img
message = "image chosen"
} else { print("FAILED IN PICKER"); return }
let alert = UIAlertController(title: "Asset Loaded", message: message, preferredStyle: .alert)
alert.addAction(UIAlertAction(title: "OK", style: UIAlertAction.Style.cancel, handler: nil))
present(alert, animated: true, completion: nil)
}
On a button press, trying to convert to video and save to camera roll
guard let img = img else { return }
let images = [img]
VideoCreator.buildVideoFromImageArray(with: images) { (url) in
print("PATH: " + url.path)
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(url.path) {
print("PATHHHHH")
UISaveVideoAtPathToSavedPhotosAlbum(
url.path,
self,
#selector(self.video(_:didFinishSavingWithError:contextInfo:)),
nil)
}
}
Video Creator: (ignore the lack of refactoring and gross code)
import UIKit
import AVFoundation
class VideoCreator {
static private var outputSize = CGSize(width: 1920, height: 1280) // placeholder
static private let imagesPerSecond: TimeInterval = 10 //each image will be stay for 3 secs
static private var selectedPhotosArray = [UIImage]()
static private var imageArrayToVideoURL = NSURL()
static private let audioIsEnabled: Bool = false //if your video has no sound
static private var asset: AVAsset!
static func buildVideoFromImageArray(with images: [UIImage], completionHandler: #escaping (_ url: URL) -> Void) {
outputSize = images[0].size
for image in images {
selectedPhotosArray.append(image)
}
guard let documentDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else { return }
imageArrayToVideoURL = NSURL(fileURLWithPath: documentDirectory.path + "/video.mov")
removeFileAtURLIfExists(url: imageArrayToVideoURL)
guard let videoWriter = try? AVAssetWriter(outputURL: imageArrayToVideoURL as URL, fileType: AVFileType.mov) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecType.h264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
guard videoWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaType.video) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
if videoWriter.startWriting() {
let zeroTime = CMTimeMake(value: Int64(imagesPerSecond),timescale: Int32(1))
videoWriter.startSession(atSourceTime: zeroTime)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = DispatchQueue(label: "mediaInputQueue")
videoWriterInput.requestMediaDataWhenReady(on: media_queue, using: { () -> Void in
let fps: Int32 = 1
let framePerSecond: Int64 = Int64(self.imagesPerSecond)
let frameDuration = CMTimeMake(value: Int64(self.imagesPerSecond), timescale: fps)
var frameCount: Int64 = 0
var appendSucceeded = true
while (!self.selectedPhotosArray.isEmpty) {
if (videoWriterInput.isReadyForMoreMediaData) {
let nextPhoto = self.selectedPhotosArray.remove(at: 0)
let lastFrameTime = CMTimeMake(value: frameCount * framePerSecond, timescale: fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer, status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(self.outputSize.width), height: Int(self.outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: CGFloat(self.outputSize.width), height: CGFloat(self.outputSize.height)))
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
//let aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
context?.draw(nextPhoto.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWriting { () -> Void in
print("-----video1 url = \(self.imageArrayToVideoURL)")
completionHandler(self.imageArrayToVideoURL as URL)
}
})
}
}
private static func removeFileAtURLIfExists(url: NSURL) {
if let filePath = url.path {
let fileManager = FileManager.default
if fileManager.fileExists(atPath: filePath) {
do{
try fileManager.removeItem(atPath: filePath)
} catch let error as NSError {
print("Couldn't remove existing destination file: \(error)")
}
}
}
}
}
You can always save your file and check if it was successfully saved to Photos using :
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: tempUrl)
})
UISaveVideoAtPathToSavedPhotosAlbum(::::) doesn’t work for some.

How to Create thumbnail from local files

I want to create a thumbnail image for files (word, excel, video ....)
This what i did:
import QuickLook
class ThumbsCreator: NSObject {
private var file : File?
init(file: File?) {
super.init()
self.file = file
}
func createThumb() {
let url = URL(string: (self.file?.path()))
}
}
After a lot of search, I found this solution :
import PDFKit
import AVKit
import WebKit
func createThumb() {
let url = URL(string: (self.file?.path()))
switch file?.type {
case: FileType.image.rawValue:
let image = UIImage(contentsOfFile: (url?.path)!)
_finalImage = self.createScaledImage(image: image!)
break
case: FileType.office.rawValue:
//Loading.......
break
case FileType.Pdf.rawValue:
guard let doc = PDFDocument(url: url!) else {return}
guard let page = doc.page(at: 0) else {return}
_finalImage = page.thumbnail(of: CGSize(width: 768, height: 1024), for: .cropBox)
break
case: FileType.video.rawValue:
let asset = AVAsset(url: url!)
let imageGenerator = AVAssetImageGenerator(asset: asset)
imageGenerator.appliesPreferredTrackTransform = true
let time = CMTime(seconds: 2, preferredTimescale: 1)
do {
let imageRef = try imageGenerator.copyCGImage(at: time, actualTime: nil)
_finalImage = UIImage(cgImage: imageRef)
} catch let error{
print("Error: \(error)")
}
break
}
}
func createScaledImage(image: UIImage) {
let THUMB_WIDTH = 150.0 - 40.0
let THUMB_HEIGHT = THUMB_WIDTH - 23.0
var itemThumb = resizeImage(image: image, constraintSize: CGSize(width: THUMB_WIDTH, height: THUMB_HEIGHT))
let thumbRect = CGRect(x: 0, y: 0, width: 10, height: 10)
UIGraphicsBeginImageContextWithOptions(thumbRect.size, true, 0.0)
let context = UIGraphicsGetCurrentContext()
// Fill a white rect
context?.setFillColor(gray: 1.0, alpha: 1.0)
context?.fill(thumbRect)
// Stroke a gray rect
let comps : [CGFloat] = [0.8, 0.8, 0.8, 1]
let colorSpace = CGColorSpaceCreateDeviceRGB()
let strokeColor = CGColor(colorSpace: colorSpace, components: comps)
context?.setStrokeColor(strokeColor!)
UIRectFrame(thumbRect)
//CGColorRelease(strokeColor!)
itemThumb.draw(in: thumbRect.insetBy(dx: 1, dy: 1))
itemThumb = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
self.finishThumCreation(image: image)
}
}
Starting from iOS 13 and macOS 10.15, there is the QuickLook Thumbnailing API. It supports any file format for which the OS can provide a preview: either because the OS knows this format or because the owner of the third-party format provided a QuickLook plugin.
Here is an example based on Apple's tutorial:
func thumbnail(for fileURL: URL, size: CGSize, scale: CGFloat) {
let request = QLThumbnailGenerator
.Request(fileAt: fileURL, size: size, scale: scale,
representationTypes: .lowQualityThumbnail)
QLThumbnailGenerator.shared.generateRepresentations(for: request)
{ (thumbnail, type, error) in
DispatchQueue.main.async {
if thumbnail == nil || error != nil {
// Handle the error case gracefully.
} else {
// Display the thumbnail that you created.
}
}
}
}
On macOS before 10.15, in my app I fallback to NSWorkspace.shared.icon(forFile:) which provides a document icon based on the file type (but not a thumbnail).
You can use https://developer.apple.com/documentation/uikit/uidocumentinteractioncontroller/1616801-icons
var icons: [UIImage] { get }
let controller = UIDocumentInteractionController(url:someUrl)
print(controller.icons.first)
Only for a video
extension UIViewController {
func thumbnail(_ sourceURL:URL) -> UIImage {
let asset = AVAsset(url: sourceURL)
let imageGenerator = AVAssetImageGenerator(asset: asset)
imageGenerator.appliesPreferredTrackTransform = true
let time = CMTime(seconds: 1, preferredTimescale: 1)
do {
let imageRef = try imageGenerator.copyCGImage(at: time, actualTime: nil)
return UIImage(cgImage: imageRef)
} catch {
print(error)
return UIImage(named: "NoVideos")!
}
}
}
There's no good API for this yet. There is NSURLThumbnailDictionaryKey, but YMMV. You can indeed get icons via UIDocumentInteractionController.

How to add transitions to set images exported as video in Swift

I am making an app which has functionality for Transitions like example-video should play from top to bottom or from left to right transitions effect.I am able to generate video from set of image array in swift 3.0 but i am not getting how to add transitions.
I got the code from this answer How do I export UIImage array as a movie?
I am using like this working fine only problem to get trasnitions
func writeImagesAsMovie(allImages: [UIImage], videoPath: String, videoSize: CGSize, videoFPS: Int32) {
// Create AVAssetWriter to write video
guard let assetWriter = createAssetWriter(videoPath, size: videoSize) else {
print("Error converting images to video: AVAssetWriter not created")
return
}
// If here, AVAssetWriter exists so create AVAssetWriterInputPixelBufferAdaptor
let writerInput = assetWriter.inputs.filter{ $0.mediaType == AVMediaTypeVideo }.first!
let sourceBufferAttributes : [String : AnyObject] = [
kCVPixelBufferPixelFormatTypeKey as String : Int(kCVPixelFormatType_32ARGB) as AnyObject,
kCVPixelBufferWidthKey as String : videoSize.width as AnyObject,
kCVPixelBufferHeightKey as String : videoSize.height as AnyObject,
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: writerInput, sourcePixelBufferAttributes: sourceBufferAttributes)
// Start writing session
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: kCMTimeZero)
if (pixelBufferAdaptor.pixelBufferPool == nil) {
print("Error converting images to video: pixelBufferPool nil after starting session")
return
}
// -- Create queue for <requestMediaDataWhenReadyOnQueue>
let mediaQueue = DispatchQueue(label: "mediaInputQueue", attributes: [])
// -- Set video parameters
let frameDuration = CMTimeMake(1, videoFPS)
var frameCount = 0
// -- Add images to video
let numImages = allImages.count
writerInput.requestMediaDataWhenReady(on: mediaQueue, using: { () -> Void in
// Append unadded images to video but only while input ready
while (writerInput.isReadyForMoreMediaData && frameCount < numImages) {
let lastFrameTime = CMTimeMake(Int64(frameCount), videoFPS)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
if !self.appendPixelBufferForImageAtURL(image: allImages[frameCount], pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
print("Error converting images to video: AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer")
return
}
frameCount += 1
}
// No more images to add? End video.
if (frameCount >= numImages) {
writerInput.markAsFinished()
assetWriter.finishWriting {
if (assetWriter.error != nil) {
print("Error converting images to video: \(String(describing: assetWriter.error))")
} else {
print("Converted images to movie # \(videoPath)")
}
}
}
})
}
func createAssetWriter(_ path: String, size: CGSize) -> AVAssetWriter? {
// Convert <path> to NSURL object
let pathURL = URL(fileURLWithPath: path)
// Return new asset writer or nil
do {
// Create asset writer
let newWriter = try AVAssetWriter(outputURL: pathURL, fileType: AVFileTypeMPEG4)
// Define settings for video input
let videoSettings: [String : AnyObject] = [
AVVideoCodecKey : AVVideoCodecH264 as AnyObject,
AVVideoWidthKey : size.width as AnyObject,
AVVideoHeightKey : size.height as AnyObject,
]
// Add video input to writer
let assetWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
newWriter.add(assetWriterVideoInput)
// Return writer
print("Created asset writer for \(size.width)x\(size.height) video")
return newWriter
} catch {
print("Error creating asset writer: \(error)")
return nil
}
}
func appendPixelBufferForImageAtURL(image: UIImage, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
var appendSucceeded = false
autoreleasepool {
if let pixelBufferPool = pixelBufferAdaptor.pixelBufferPool {
let pixelBufferPointer = UnsafeMutablePointer<CVPixelBuffer?>.allocate(capacity:1)
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(
kCFAllocatorDefault,
pixelBufferPool,
pixelBufferPointer
)
if let pixelBuffer = pixelBufferPointer.pointee , status == 0 {
fillPixelBufferFromImage(image: image, pixelBuffer: pixelBuffer)
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
pixelBufferPointer.deinitialize()
} else {
NSLog("Error: Failed to allocate pixel buffer from pool")
}
pixelBufferPointer.deallocate(capacity: 1)
}
}
return appendSucceeded
}
func fillPixelBufferFromImage(image: UIImage, pixelBuffer: CVPixelBuffer) {
CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
// Create CGBitmapContext
let context = CGContext(
data: pixelData,
width: Int(image.size.width),
height: Int(image.size.height),
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer),
space: rgbColorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue
)
// Draw image into context
context?.draw(image .cgImage!, in: CGRect(x: 0.0,y: 0.0,width: image.size.width,height: image .size.height))
CVPixelBufferUnlockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
}