Rotate video not woking as expect - swift

I had function to rotate video, first time I rotate with angle = pi / 2 it working normal, but if I continue rotate it with (pi / 2), it not working, it still only rotate (pi / 2), but I expect it rotate pi. Any one can help me, thanks
let rotateTransform = CGAffineTransform(rotationAngle: CGFloat((self.rotateAngle * Double.pi)/180))
let videoAsset = AVURLAsset(url: sourceURL)
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
var clipVideoTrack = videoAsset.tracks(withMediaType: .video)
try? compositionVideoTrack?.insertTimeRange(CMTimeRange(start: .zero, duration: videoAsset.duration), of: clipVideoTrack[0], at: .zero)
compositionVideoTrack?.preferredTransform = videoAsset.preferredTransform
let videoTrack = videoAsset.tracks(withMediaType: .video)[0]
let videoSize = videoTrack.naturalSize
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
let timeScale = CMTimeScale(videoTrack.nominalFrameRate)
videoComposition.frameDuration = CMTime(value: 1, timescale: timeScale)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(start: .zero, duration: mixComposition.duration)
let mixVideoTrack = mixComposition.tracks(withMediaType: .video)[0]
mixVideoTrack.preferredTransform = rotateTransform
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: mixVideoTrack)
layerInstruction.setTransform(mixVideoTrack.preferredTransform, at: .zero)
instruction.layerInstructions = [layerInstruction]
videoComposition.instructions = [instruction]
guard let exportSession = AVAssetExportSession(asset: mixComposition,
presetName: AVAssetExportPresetPassthrough) else {
return
}
exportSession.outputURL = outputURL
exportSession.outputFileType = .mov
exportSession.videoComposition = videoComposition
exportSession.exportAsynchronously { [weak self] in
self?.handleExportSession(exportSession: exportSession, sourceURL: sourceURL, outputURL: outputURL)
}
}

If you set the transform, this will always be applied to the original orientation. So when you set a rotation matrix by pi/2, the original will be rotated by pi/2. If you apply the same rotation matrix again, the original will be rotated again by pi/2. What you need is to combine the current transform with the new rotation and then set that resulting transformation.
Something like:
let currentTransform = mixVideoTrack.preferredTransform // or whereever you get it from
let newTransform = currentTransform.rotated(by: CGFloat((self.rotateAngle * Double.pi)/180))
mixVideoTrack.preferredTransform = newTransform

Related

animate a moving watermark over video using calayer

I am on swift and trying to make a watermark move positions over a video. so far all I'm getting is a watermark that stays still over the video that is playing. how can I make it move? Basically what I'm trying to replicate is like the watermark from tiktok videos that pop in from one location to the other. I am really confused as to why this doesn't work. please help. thanks!
func processVideoWithElements(item: MediaItem, completion: #escaping ProcessCompletionHandler) {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
let clipVideoTrack = item.sourceAsset.tracks(withMediaType: AVMediaType.video).first
let clipAudioTrack = item.sourceAsset.tracks(withMediaType: AVMediaType.audio).first
do {
try compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: item.sourceAsset.duration), of: clipVideoTrack!, at: CMTime.zero)
} catch {
completion(MediaProcessResult(processedUrl: nil, image: nil), error)
}
if (clipAudioTrack != nil) {
let compositionAudioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try compositionAudioTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: item.sourceAsset.duration), of: clipAudioTrack!, at: CMTime.zero)
} catch {
completion(MediaProcessResult(processedUrl: nil, image: nil), error)
}
}
compositionVideoTrack?.preferredTransform = (item.sourceAsset.tracks(withMediaType: AVMediaType.video).first?.preferredTransform)!
let sizeOfVideo = item.size
let optionalLayer = CALayer()
processAndAddElements(item: item, layer: optionalLayer)
optionalLayer.frame = CGRect(x: 0, y: 0, width: sizeOfVideo.width, height: sizeOfVideo.height)
optionalLayer.masksToBounds = true
// animate layer ---------------------------------------
let positionAnimation = CABasicAnimation(keyPath: #keyPath(CALayer.position))
positionAnimation.beginTime = CMTime.zero.seconds
positionAnimation.fromValue = CGPoint(x: -20, y: 100)
positionAnimation.toValue = CGPoint(x: 280, y: 100)
positionAnimation.duration = 3//item.sourceAsset.duration.seconds
positionAnimation.repeatCount = 2
positionAnimation.autoreverses = true
optionalLayer.add(positionAnimation, forKey: #keyPath(CALayer.position))
//------------------------------------
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: sizeOfVideo.width, height: sizeOfVideo.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: sizeOfVideo.width, height: sizeOfVideo.height)
parentLayer.addSublayer(videoLayer)
parentLayer.addSublayer(optionalLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.frameDuration = CMTimeMake(value: kMediaContentTimeValue, timescale: kMediaContentTimeScale)
videoComposition.renderSize = sizeOfVideo
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: mixComposition.duration)
let videoTrack = mixComposition.tracks(withMediaType: AVMediaType.video).first
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack!)
layerInstruction.setTransform(transform(avAsset: item.sourceAsset, scaleFactor: kMediaContentDefaultScale), at: CMTime.zero)
instruction.layerInstructions = [layerInstruction]
videoComposition.instructions = [instruction]
let processedUrl = processedMoviePath()
clearTemporaryData(url: processedUrl, completion: completion)
let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exportSession?.videoComposition = videoComposition
exportSession?.outputURL = processedUrl
exportSession?.outputFileType = AVFileType.mp4
exportSession?.exportAsynchronously(completionHandler: {
if exportSession?.status == AVAssetExportSession.Status.completed {
completion(MediaProcessResult(processedUrl: processedUrl, image: nil), nil)
} else {
completion(MediaProcessResult(processedUrl: nil, image: nil), exportSession?.error)
}
})

How to combine (hstack) multiple videos side by side with AVMutableVideoComposition?

I'm trying to combine 3 videos into single video where videos are cropped and put side by side in hstack.
Here is my current solution which puts only the first video 3 times (repeating) side by side. I can not figure out what am I doing wrong.
import Foundation
import AVFoundation
func hstackVideos() {
let videoPaths: [String] = [
"path/to/video1.mp4",
"path/to/video2.mp4",
"path/to/video3.mp4",
]
let composition = AVMutableComposition()
let assetInfos: [(AVURLAsset, AVAssetTrack, AVMutableCompositionTrack)] = videoPaths.map {
let asset = AVURLAsset(url: URL(fileURLWithPath: $0))
let track = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)!
let videoAssetTrack = asset.tracks(withMediaType: .video)[0]
try! track.insertTimeRange(CMTimeRangeMake(start: videoAssetTrack.timeRange.start, duration: videoAssetTrack.timeRange.duration), of: videoAssetTrack, at: videoAssetTrack.timeRange.start)
return (asset, videoAssetTrack, track)
}
let stackComposition = AVMutableVideoComposition()
stackComposition.renderSize = CGSize(width: 512, height: 288)
stackComposition.frameDuration = CMTime(seconds: 1/30, preferredTimescale: 600)
var i = 0
let instructions: [AVMutableVideoCompositionLayerInstruction] = assetInfos.map { (asset, assetTrack, compTrack) in
let lInst = AVMutableVideoCompositionLayerInstruction(assetTrack: assetTrack)
let w: CGFloat = 512/CGFloat(assetInfos.count)
let inRatio = assetTrack.naturalSize.width / assetTrack.naturalSize.height
let cropRatio = w / 288
let scale: CGFloat
if inRatio < cropRatio {
scale = w / assetTrack.naturalSize.width
} else {
scale = 288 / assetTrack.naturalSize.height
}
lInst.setCropRectangle(CGRect(x: w/scale, y: 0, width: w/scale, height: 288/scale), at: CMTime.zero)
let transform = CGAffineTransform(scaleX: scale, y: scale)
let t2 = transform.concatenating(CGAffineTransform(translationX: -w + CGFloat(i)*w, y: 0))
lInst.setTransform(t2, at: CMTime.zero)
i += 1
return lInst
}
let inst = AVMutableVideoCompositionInstruction()
inst.timeRange = CMTimeRange(start: CMTime.zero, duration: assetInfos[0].0.duration)
inst.layerInstructions = instructions
stackComposition.instructions = [inst]
let exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality)!
let outPath = "path/to/finalVideo.mp4"
let outUrl = URL(fileURLWithPath: outPath)
try? FileManager.default.removeItem(at: outUrl)
exporter.outputURL = outUrl
exporter.videoComposition = stackComposition
exporter.outputFileType = .mp4
exporter.shouldOptimizeForNetworkUse = true
let group = DispatchGroup()
group.enter()
exporter.exportAsynchronously(completionHandler: {
switch exporter.status {
case .completed:
print("SUCCESS!")
if exporter.error != nil {
print("Error: \(String(describing: exporter.error))")
print("Description: \(exporter.description)")
}
group.leave()
case .exporting:
let progress = exporter.progress
print("Progress: \(progress)")
case .failed:
print("Error: \(String(describing: exporter.error))")
print("Description: \(exporter.description)")
group.leave()
default:
break
}
})
group.wait()
}
You're applying instructions to source asset track, but you need to apply them to output composition track, like this:
let instructions: [AVMutableVideoCompositionLayerInstruction] = assetInfos.map { (asset, assetTrack, compTrack) in
let lInst = AVMutableVideoCompositionLayerInstruction(assetTrack: compTrack)
let w: CGFloat = 512/CGFloat(assetInfos.count)
let inRatio = compTrack.naturalSize.width / compTrack.naturalSize.height
let cropRatio = w / 288
let scale: CGFloat
if inRatio < cropRatio {
scale = w / compTrack.naturalSize.width
} else {
scale = 288 / compTrack.naturalSize.height
}
lInst.setCropRectangle(CGRect(x: w/scale, y: 0, width: w/scale, height: 288/scale), at: CMTime.zero)
let transform = CGAffineTransform(scaleX: scale, y: scale)
let t2 = transform.concatenating(CGAffineTransform(translationX: -w + CGFloat(i)*w, y: 0))
lInst.setTransform(t2, at: CMTime.zero)
i += 1
return lInst
}

VideoComposition not honoring instructions

Since updating to iOS 13 my video composition I use to fade a video in and out is broken. This is my code which worked correctly up until installing iOS 13.
Now when I export the video there is sound and just a black screen.
let urlAsset = AVURLAsset(url: inputURL, options: nil)
guard let exportSession = AVAssetExportSession(asset: urlAsset, presetName: AVAssetExportPresetHighestQuality) else { handler(nil)
return
}
exportSession.outputURL = outputURL
exportSession.outputFileType = AVFileType.m4v
exportSession.shouldOptimizeForNetworkUse = true
let composition = AVMutableVideoComposition(propertiesOf: urlAsset)
let clipVideoTrack = urlAsset.tracks(withMediaType: AVMediaType.video)[0]
let timeDuration = CMTimeMake(value: 1, timescale: 1)
let layer = AVMutableVideoCompositionLayerInstruction(assetTrack: clipVideoTrack)
// MARK: Fade in effect
layer.setOpacityRamp(fromStartOpacity: 0.0, toEndOpacity: 1.0, timeRange: CMTimeRange(start: CMTime.zero, duration: timeDuration))
// MARK: Fade out effect
let startTime = CMTimeSubtract(urlAsset.duration, CMTimeMake(value: 1, timescale: 1))
layer.setOpacityRamp(
fromStartOpacity: 1.0,
toEndOpacity: 0.0,
timeRange: CMTimeRangeMake(start: startTime, duration: timeDuration)
)
let instruction = AVMutableVideoCompositionInstruction()
instruction.layerInstructions = [layer]
instruction.timeRange = CMTimeRange(start: CMTime.zero, duration: urlAsset.duration)
composition.instructions = [instruction]
exportSession.videoComposition = composition
exportSession.exportAsynchronously { () -> Void in
handler(exportSession)
print("composition has completed")
}
Apple said there was a bug affecting some instructions for compositions. This was fixed in iOS 13.1. I updated and ran the function and the fade in and out worked as it was before iOS 13 update.

index 0 beyond bounds for empty NSArray with asset.tracks(withMediaType:)

I try to crop a video by taking a precise size of it, but I have an error with the asset.tracks(withMediaType: AVMediaType.video)[0] line that returns me a index 0 beyond bounds for empty. I precise that there is a video under that url...
iOS: [NSArray0 objectAtIndex:] index 0 beyond bounds for empty NSArray with asset.tracks(withMediaType: AVMediaType.video)[0]
// input file
let composition = AVMutableComposition()
composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
// input clip
let asset = AVAsset(url: URL(fileURLWithPath: video))
let videoTrack: AVAssetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
//=> problem is here
// crop clip to screen ratio
let orientation: UIInterfaceOrientation = self.orientation(forTrack: asset)
// make render size square
let videoComposition = AVMutableVideoComposition.init()
let height: CGFloat = 960
let width: CGFloat = 960
videoComposition.renderSize = CGSize(width: CGFloat(width), height: CGFloat(height))
videoComposition.frameDuration = CMTimeMake(1, 30)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeMakeWithSeconds(60, 30))
// rotate and position video
let transformer = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
var txWidth: CGFloat = (videoTrack.naturalSize.width - width) / 2
let txHeight: CGFloat = (videoTrack.naturalSize.height - height) / 2
if orientation == .portrait || orientation == .landscapeRight {
// invert translation
txWidth *= -1
}
// t1: rotate and position video since it may have been cropped to screen ratio
let t1: CGAffineTransform = videoTrack.preferredTransform.translatedBy(x: txWidth, y: -txHeight)
transformer.setTransform(t1, at: kCMTimeZero)
instruction.layerInstructions = [transformer]
videoComposition.instructions = [instruction] as [AVVideoCompositionInstructionProtocol]
// export
let exporter = AVAssetExportSession(asset: asset, presetName: AVAssetExportPresetHighestQuality)
exporter?.videoComposition = videoComposition
exporter?.outputURL = URL(fileURLWithPath: video)
exporter?.outputFileType = .mov
exporter?.exportAsynchronously(completionHandler: {
print("Exporting done!")
})
Any ideas?
When dealing with video processing you need to make sure that all imported files are of an acceptable extension such as doing this
let acceptableVideoExtensions = ["mov", "mp4", "m4v"]
if acceptableVideoExtensions.contains(videoURL.pathExtension) {
// Process the video
}
else {
//Alert the user
}

iOS AVMutableComposition Add text overlay

can someone please advise.
I am trying to add a text overlay (title) to a video I am composing using AVFoundation. I found a few online resources (see http://stackoverflow.com/questions/21684549/add-a-text-overlay-with-avmutablevideocomposition-to-a-specific-timerange)
However all these resources are in Objective-C.
My project is in Swift and I cannot find any related resources in Swift.
I am not able to get the text to overlay properly is seems distorted as if the frame in which is gets rendered is skewed...
See picture Distorted text in AVPlayer
I have attempted to convert the Objective-C code I found to Swift but obviously I am missing something.
Below is the code I am using.
(I used some code for the player and the video file from:www.raywenderlich.com/90488/calayer-in-ios-with-swift-10-examples
func MergeUnWeldedVideoByUserPref(showInBounds: CGRect) -> (AVMutableComposition, AVMutableVideoComposition)
{
let fps: Int32 = 30
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()
// 2 - Create a video track for each of the video assests. Add your media data to the appropriate tracks
//let url = NSBundle.mainBundle().URLForResource("colorfulStreak", withExtension: "m4v")!
let url = NSBundle.mainBundle().URLForResource("colorfulStreak", withExtension: "m4v")!
let avAsset = AVAsset(URL: url)
let track = mixComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let segmentInMovie = CMTimeRangeMake(kCMTimeZero, avAsset.duration)
let videoTrack = avAsset.tracksWithMediaType(AVMediaTypeVideo)[0]
do
{
try track.insertTimeRange(segmentInMovie, ofTrack: videoTrack, atTime: kCMTimeZero)
} catch{
print("Failed to load track")
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, avAsset.duration)
let instruction = videoCompositionInstructionForTrack(showInBounds, track: track, asset: avAsset)
mainInstruction.layerInstructions.append(instruction)
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, fps)
mainComposition.renderSize = CGSize(width: showInBounds.width, height: showInBounds.height)
let textLayer = CATextLayer()
textLayer.backgroundColor = UIColor.clearColor().CGColor
textLayer.foregroundColor = UIColor.whiteColor().CGColor
textLayer.string = "T E S T"
textLayer.font = UIFont(name: "Arial", size: 18)
textLayer.shadowOpacity = 0.5
textLayer.alignmentMode = kCAAlignmentCenter
textLayer.frame = CGRectMake(5, 5, 100, 50)
textLayer.shouldRasterize = true
textLayer.rasterizationScale = showInBounds.width / videoTrack.naturalSize.width
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRectMake(0, 0, showInBounds.width, showInBounds.height);
videoLayer.frame = CGRectMake(0, 0, showInBounds.width, showInBounds.height);
parentLayer.addSublayer(videoLayer)
parentLayer.addSublayer(textLayer)
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, inLayer: parentLayer)
return (mixComposition, mainComposition)
}
There is nothing wrong with your Swift interpretation and is rather an issue with the rendering engine of the simulator. I tried your code on the simulator and it indeed looked skewed and distorted but when compiling to the device it worked beautifully.