I'm trying two merge 2 videos with a 2 second overlap. In this overlap I'd like to fade the second video in (or fade the first one out to reveal the second, either one would be great).
The first video is fading out 2 seconds before the end as intended, but as it fades I get a black screen instead of the second video fading in. At the end of video 1, video 2 shows up half way through its fade in animation.
What am I doing wrong with the tracks that I can't see them overlapping? Below is my code
func setupVideo() {
let url = URL(fileURLWithPath: Bundle.main.path(forResource: "demoVideoTwo", ofType: "mp4")!)
let assetOne = AVAsset(url: url)
let urlTwo = URL(fileURLWithPath: Bundle.main.path(forResource: "demoVideoThree", ofType: "mp4")!)
let assetTwo = AVAsset(url: urlTwo)
let mixComposition = AVMutableComposition()
var instructions = [AVMutableVideoCompositionLayerInstruction]()
var mainInstructionList = [AVMutableVideoCompositionInstruction]()
var lastTime = CMTime.zero
// Create Track One
guard let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
return
}
// Setup AVAsset 1
let timeRange = CMTimeRangeMake(start: CMTime.zero, duration: assetOne.duration)
do {
try videoTrack.insertTimeRange(timeRange, of: assetOne.tracks(withMediaType: .video)[0], at: lastTime)
try audioTrack.insertTimeRange(timeRange, of: assetOne.tracks(withMediaType: .audio)[0], at: lastTime)
} catch {
print(error)
}
// Setup Layer Instruction 1
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let duration = CMTime(seconds: 2, preferredTimescale: 60)
let transitTime = CMTime(seconds: 2, preferredTimescale: 60)
let insertTime = CMTimeSubtract(assetOne.duration, transitTime)
let instRange = CMTimeRangeMake(start: insertTime, duration: duration)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: instRange)
instructions.append(layerInstruction)
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: lastTime, duration: assetOne.duration)
mainInstruction.layerInstructions = instructions
mainInstructionList.append(mainInstruction)
lastTime = CMTimeAdd(lastTime, assetOne.duration)
// Create Track One
guard let videoTrackTwo = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), let audioTrackTwo = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
return
}
// Setup AVAsset 2
let transitionTime = CMTime(seconds: 2, preferredTimescale: 60)
let newLastTime = CMTimeSubtract(assetOne.duration, transitionTime)
let timeRangeTwo = CMTimeRangeMake(start: CMTime.zero, duration: assetTwo.duration)
do {
try videoTrackTwo.insertTimeRange(timeRangeTwo, of: assetTwo.tracks(withMediaType: .video)[0], at: newLastTime)
try audioTrackTwo.insertTimeRange(timeRangeTwo, of: assetTwo.tracks(withMediaType: .audio)[0], at: newLastTime)
} catch {
print(error)
}
// Setup Layer Instruction 2
let layerInstructionTwo = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrackTwo)
let durationTwo = CMTime(seconds: 4, preferredTimescale: 60)
let instRangeTwo = CMTimeRangeMake(start: newLastTime, duration: durationTwo)
layerInstructionTwo.setOpacityRamp(fromStartOpacity: 0.0, toEndOpacity: 1.0, timeRange: instRangeTwo)
instructions.append(layerInstructionTwo)
let mainInstructionTwo = AVMutableVideoCompositionInstruction()
mainInstructionTwo.timeRange = CMTimeRangeMake(start: lastTime, duration: assetTwo.duration)
mainInstructionTwo.layerInstructions = instructions
mainInstructionList.append(mainInstructionTwo)
// Setup Video Composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = mainInstructionList
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 60)
mainComposition.renderSize = videoTrack.naturalSize
let item = AVPlayerItem(asset: mixComposition)
item.videoComposition = mainComposition
player = AVPlayer(playerItem: item)
let playerLayer: AVPlayerLayer = {
let layer = AVPlayerLayer(player: player)
layer.videoGravity = .resizeAspectFill
return layer
}()
let playerWidth: CGFloat = UIScreen.main.bounds.size.width
let videoHeight = UIScreen.main.bounds.size.width * 9 / 16
playerLayer.frame = CGRect(x: 0, y: 0, width: playerWidth, height: videoHeight)
self.layer.addSublayer(playerLayer)
}
Don't create AVMutableVideoCompositionInstruction for each
video track and don't assign instructions to it each time. Instead, create AVMutableVideoCompositionInstruction just once right before create AVMutableVideoComposition then assign instructions to that.
Here is the code working that way you want.
func setupVideo() {
let url = URL(fileURLWithPath: Bundle.main.path(forResource: "demoVideoTwo", ofType: "mp4")!)
let assetOne = AVAsset(url: url)
let urlTwo = URL(fileURLWithPath: Bundle.main.path(forResource: "demoVideoTwo", ofType: "mp4")!)
let assetTwo = AVAsset(url: urlTwo)
let mixComposition = AVMutableComposition()
var instructions = [AVMutableVideoCompositionLayerInstruction]()
var lastTime = CMTime.zero
// Create Track One
guard let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
return
}
// Setup AVAsset 1
let timeRange = CMTimeRangeMake(start: CMTime.zero, duration: assetOne.duration)
do {
try videoTrack.insertTimeRange(timeRange, of: assetOne.tracks(withMediaType: .video)[0], at: lastTime)
try audioTrack.insertTimeRange(timeRange, of: assetOne.tracks(withMediaType: .audio)[0], at: lastTime)
} catch {
print(error)
}
// Setup Layer Instruction 1
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let duration = CMTime(seconds: 2, preferredTimescale: 60)
let transitTime = CMTime(seconds: 2, preferredTimescale: 60)
let insertTime = CMTimeSubtract(assetOne.duration, transitTime)
let instRange = CMTimeRangeMake(start: insertTime, duration: duration)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: instRange)
instructions.append(layerInstruction)
lastTime = CMTimeAdd(lastTime, assetOne.duration)
// Create Track One
guard let videoTrackTwo = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), let audioTrackTwo = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
return
}
// Setup AVAsset 2
let transitionTime = CMTime(seconds: 2, preferredTimescale: 60)
let newLastTime = CMTimeSubtract(assetOne.duration, transitionTime)
let timeRangeTwo = CMTimeRangeMake(start: CMTime.zero, duration: assetTwo.duration)
do {
try videoTrackTwo.insertTimeRange(timeRangeTwo, of: assetTwo.tracks(withMediaType: .video)[0], at: newLastTime)
try audioTrackTwo.insertTimeRange(timeRangeTwo, of: assetTwo.tracks(withMediaType: .audio)[0], at: newLastTime)
} catch {
print(error)
}
// Setup Layer Instruction 2
let layerInstructionTwo = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrackTwo)
layerInstructionTwo.setOpacity(1.0, at: newLastTime)
instructions.append(layerInstructionTwo)
// Setup Video Composition
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: CMTimeAdd(newLastTime, assetTwo.duration))
mainInstruction.layerInstructions = instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 60)
mainComposition.renderSize = videoTrack.naturalSize
let item = AVPlayerItem(asset: mixComposition)
item.videoComposition = mainComposition
player = AVPlayer(playerItem: item)
let playerLayer: AVPlayerLayer = {
let layer = AVPlayerLayer(player: player)
layer.videoGravity = .resizeAspectFill
return layer
}()
let playerWidth: CGFloat = UIScreen.main.bounds.size.width
let videoHeight = UIScreen.main.bounds.size.width * 9 / 16
playerLayer.frame = CGRect(x: 0, y: 0, width: playerWidth, height: videoHeight)
self.layer.addSublayer(playerLayer)
}
Related
I am trying to add a view over a video and export the newly created URL, but when exporting the AVAssetExportSession I get this error:
com.apple.coremedia.rootQueue.27 (103): EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP, subcode=0x0)
The code that does this exporting is this:
func addViewToVideo(cameraSelection : MyCameraViewController.CameraSelection,fromVideoAt videoURL: URL, withView: UIView, completion: #escaping (URL?) -> Void) {
let imageTransformedFromView = withView.asImage()
let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard let compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid), let assetTrack = asset.tracks(withMediaType: .video).first else{
print("Something is wrong with the asset")
completion(nil)
return
}
do{
let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first, let compositionAudioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid){
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
}catch{
print(error)
completion(nil)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoSize = CGSize(
width: assetTrack.naturalSize.height,
height: assetTrack.naturalSize.width)
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
addImage(imagePassed: imageTransformedFromView, to: overlayLayer, videoSize: videoSize)
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero,duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
cameraSelection: cameraSelection, for: compositionTrack,
assetTrack: assetTrack)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPreset1920x1080)
else {
print("Cannot create export session.")
completion(nil)
return
}
let videoName = UUID().uuidString
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(videoName)
.appendingPathExtension("mov")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
completion(exportURL)
default:
print("Something went wrong during export.")
print(export.error ?? "unknown error")
completion(nil)
break
}
}
}
}
I get this error ONLY WHEN RUNNING ON SIMULATOR and not on a real device? Can please someone tell me what could be the cause?
I am creating an IOS video editing tool by AVMutableComposition.
If I combine multiple videos in a single video mutabletrack, it works fine. However, when I create mutabletrack per video and synthesize the video and play it in AVPlayer, only sound plays and video stops when move on the next video. However, if I seek video, it works normally. What's the cause?
The following is the code in which the current situation occur.
static func mergeClips(
videos: [ExCAVideoLayer]?,
audios: [ExCAAudioLayer]? = nil
) -> AVPlayerItem? {
guard let videos = videos else {
return nil
}
let mixComposition = AVMutableComposition()
var layerInstructions = [AVMutableVideoCompositionLayerInstruction]()
do {
// Determind video aspect ratio
var aspectRatio: Double = 0.00001
for video in videos {
guard let url = video.videoUrl else {
continue
}
if let naturalSize = self.resolutionForLocalVideo(url: url) {
let naturalAspectRatio = naturalSize.height == 0 ? 1 : naturalSize.width / naturalSize.height
if aspectRatio < naturalAspectRatio {
aspectRatio = naturalAspectRatio
}
}
}
var frameWidth = UIScreen.main.bounds.width
var frameHeight = round(UIScreen.main.bounds.width / aspectRatio)
if frameWidth.truncatingRemainder(dividingBy: 2) > 0 {
frameWidth = frameWidth - 1
}
if frameHeight.truncatingRemainder(dividingBy: 2) > 0 {
frameHeight = frameHeight - 1
}
let frameSize = CGSize(width: frameWidth, height: frameHeight)
// Video composition
var currentDuration = mixComposition.duration
for video in videos {
guard let url = video.videoUrl else {
continue
}
let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
let avAsset = AVAsset(url: url)
if let layerInstruction = self.videoCompositionInstruction(videoTrack, asset: avAsset, frameSize: frameSize) {
layerInstructions.append(layerInstruction)
}
let streamRange = CMTimeRangeMake(start: CMTime.zero, duration: CMTimeAdd(avAsset.duration, CMTimeMakeWithSeconds(video.gap, preferredTimescale: 1)))
if let streamVideoTrack = avAsset.tracks(withMediaType: .video).first {
try videoTrack?.insertTimeRange(streamRange, of: streamVideoTrack, at: currentDuration)
videoTrack?.preferredTransform = streamVideoTrack.preferredTransform
}
if let streamAudioTrack = avAsset.tracks(withMediaType: .audio).first {
try audioTrack?.insertTimeRange(streamRange, of: streamAudioTrack, at: currentDuration)
}
currentDuration = mixComposition.duration
}
// Audio composition
if let audios = audios {
for audio in audios {
guard let url = audio.audioUrl else {
continue
}
let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
let avAsset = AVAsset(url: url)
let streamRange = CMTimeRangeMake(start: CMTime.zero, duration: avAsset.duration)
if let streamAudioTrack = avAsset.tracks(withMediaType: .audio).first {
try audioTrack?.insertTimeRange(streamRange, of: streamAudioTrack, at: CMTimeMakeWithSeconds(audio.startTime, preferredTimescale: 1))
}
}
}
//AVVideoComposition and layout
let videoComposition = AVMutableVideoComposition()
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.renderSize = CGSize(width: frameSize.width, height: frameSize.height)
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction .timeRange = CMTimeRange(start: .zero, duration: mixComposition.duration)
mainInstruction.backgroundColor = UIColor.black.cgColor
mainInstruction.layerInstructions = layerInstructions
videoComposition.instructions = [mainInstruction]
let assetKeys = [
"playable",
"hasProtectedContent"
]
let avPlayItem = AVPlayerItem(asset: mixComposition, automaticallyLoadedAssetKeys: assetKeys)
avPlayItem.videoComposition = videoComposition
return avPlayItem
}
catch(let error) {
print("Could not create mixComposition \(error.localizedDescription)")
return nil
}
}
func PlayVideo() {
guard let avPlayerItem = VideoComposition.mergeClips(videos: self.projectRender?.videoItems) else {
return
}
self.avPlayer = AVPlayer(playerItem: avPlayerItem)
self.avPlayerLayer = AVPlayerLayer(player: self.avPlayer)
self.avPlayerLayer?.needsDisplayOnBoundsChange = true
if let avPlayerLayer = self.avPlayerLayer {
self.videoContainer?.layer.addSublayer(avPlayerLayer)
}
self.avPlayer?.play()
}
I want to add a Image over a video and in order to do so I follow this tutorial:
The code I implemented is this:
func addViewToVideo(fromVideoAt videoURL: URL, withView: UIView, completion: #escaping (URL?) -> Void) {
let imageTransformedFromView = withView.asImage()
let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard let compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid), let assetTrack = asset.tracks(withMediaType: .video).first else{
print("Something is wrong with the asset")
completion(nil)
return
}
do{
let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first, let compositionAudioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid){
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
}catch{
print(error)
completion(nil)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoSize: CGSize
videoSize = CGSize(
width: assetTrack.naturalSize.height,
height: assetTrack.naturalSize.width)
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
addImage(imagePassed: imageTransformedFromView, to: overlayLayer, videoSize: videoSize)
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero,duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPreset1920x1080)
else {
print("Cannot create export session.")
completion(nil)
return
}
let videoName = UUID().uuidString
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(videoName)
.appendingPathExtension("mov")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
completion(exportURL)
default:
print("Something went wrong during export.")
print(export.error ?? "unknown error")
completion(nil)
break
}
}
}
}
func compositionLayerInstruction(for track: AVCompositionTrack, assetTrack: AVAssetTrack) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
var transform = CGAffineTransform.identity
let assetSize = assetTrack.naturalSize
transform = CGAffineTransform(a: 0, b: 1, c: -1, d: 0, tx: assetSize.height, ty: 0)
instruction.setTransform(transform, at: .zero)
return instruction
}
It works pretty fine apart from the fact that when I pass a video from the front camera it doesn't mirror it.
I'm pretty sure the reason why it doesn't mirror it stays behind the CGAffineTransform transformation.
To be honest I don't know how to work with the a,b,c,d,tx,ty constructors, but how could I transform the video such that it seems it's mirrored?
I've been trying to search for this all day, but all answers point to older versions of Swift or Obj-C.
I tried layer instructions, but AVMutableComposition has no member instructions. I remember this being really easy with just an affineTransform, but now I no longer know where I found this.
var mainVideoURL:URL!
let paths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let tempPath = paths[0] + "/mainVideo.mp4"
if(FileManager.default.fileExists(atPath: tempPath)){
guard (try? FileManager.default.removeItem(atPath: tempPath)) != nil else {
print("remove path failed")
self.enableButtons(enabled:true)
return
}
}
mainVideoURL = URL(fileURLWithPath: tempPath)
let firstAsset = AVURLAsset(url: fileURL)
let mixComposition = AVMutableComposition()
// repeat video number of times
let videoRepeat = photoVideoRepeats
for i in 0 ... videoRepeat - 1 {
do {
try mixComposition.insertTimeRange(CMTimeRangeMake(kCMTimeZero, firstAsset.duration),
of: firstAsset,
at: kCMTimeZero + CMTimeMultiply(firstAsset.duration,Int32(i)))
} catch _ {
print("Failed to load first track")
}
}
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
After my video was recorded, I was able to do a CGAffineTransform on a AVMutableCompositionTrack.
In my case I needed to merge an audio track with the video, but you can see where the transforms take place:
func mergeVideoAndAudio(videoUrl: URL,
audioUrl: URL) -> AVAsset {
let mixComposition = AVMutableComposition()
var mutableCompositionVideoTrack = [AVMutableCompositionTrack]()
var mutableCompositionAudioTrack = [AVMutableCompositionTrack]()
var mutableCompositionAudioOfVideoTrack = [AVMutableCompositionTrack]()
//start merge
let aVideoAsset = AVAsset(url: videoUrl)
let aAudioAsset = AVAsset(url: audioUrl)
let compositionAddVideo = mixComposition.addMutableTrack(withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid)
let compositionAddAudio = mixComposition.addMutableTrack(withMediaType: .audio,
preferredTrackID: kCMPersistentTrackID_Invalid)
let compositionAddAudioOfVideo = mixComposition.addMutableTrack(withMediaType: .audio,
preferredTrackID: kCMPersistentTrackID_Invalid)
let aVideoAssetTrack: AVAssetTrack = aVideoAsset.tracks(withMediaType: AVMediaType.video)[0]
let aAudioOfVideoAssetTrack: AVAssetTrack? = aVideoAsset.tracks(withMediaType: AVMediaType.audio).first
let aAudioAssetTrack: AVAssetTrack = aAudioAsset.tracks(withMediaType: AVMediaType.audio)[0]
// Default must have tranformation
compositionAddVideo?.preferredTransform = aVideoAssetTrack.preferredTransform
var transforms = aVideoAssetTrack.preferredTransform
if UIDevice.current.orientation == UIDeviceOrientation.landscapeLeft {
transforms = transforms.concatenating(CGAffineTransform(rotationAngle: CGFloat(-90.0 * .pi / 180)))
transforms = transforms.concatenating(CGAffineTransform(translationX: 1280, y: 0))
}
else if UIDevice.current.orientation == UIDeviceOrientation.landscapeRight {
transforms = transforms.concatenating(CGAffineTransform(rotationAngle: CGFloat(90.0 * .pi / 180)))
transforms = transforms.concatenating(CGAffineTransform(translationX: 1280, y: 0))
}
else if UIDevice.current.orientation == UIDeviceOrientation.portraitUpsideDown {
transforms = transforms.concatenating(CGAffineTransform(rotationAngle: CGFloat(180.0 * .pi / 180)))
transforms = transforms.concatenating(CGAffineTransform(translationX: 0, y: 720))
}
compositionAddVideo?.preferredTransform = transforms
mutableCompositionVideoTrack.append(compositionAddVideo!)
mutableCompositionAudioTrack.append(compositionAddAudio!)
mutableCompositionAudioOfVideoTrack.append(compositionAddAudioOfVideo!)
do {
try mutableCompositionVideoTrack[0].insertTimeRange(CMTimeRangeMake(start: CMTime.zero,
duration: aVideoAssetTrack.timeRange.duration),
of: aVideoAssetTrack,
at: CMTime.zero)
//In my case my audio file is longer then video file so i took videoAsset duration
//instead of audioAsset duration
try mutableCompositionAudioTrack[0].insertTimeRange(CMTimeRangeMake(start: CMTime.zero,
duration: aVideoAssetTrack.timeRange.duration),
of: aAudioAssetTrack,
at: CMTime.zero)
// adding audio (of the video if exists) asset to the final composition
if let aAudioOfVideoAssetTrack = aAudioOfVideoAssetTrack {
try mutableCompositionAudioOfVideoTrack[0].insertTimeRange(CMTimeRangeMake(start: CMTime.zero,
duration: aVideoAssetTrack.timeRange.duration),
of: aAudioOfVideoAssetTrack,
at: CMTime.zero)
}
} catch {
print(error.localizedDescription)
}
return mixComposition
}
I tried to convert an objective C code to swift 4, The code is adding video over another video http://www.theappguruz.com/blog/ios-overlap-multiple-videos this the objective-c Code
I got this error :
"(Error Domain=AVFoundationErrorDomain Code=-11841 "Operation Stopped" UserInfo={NSLocalizedFailureReason=The video could not be composed., NSLocalizedDescription=Operation Stopped, NSUnderlyingError=0x604000646cf0 {Error Domain=NSOSStatusErrorDomain Code=-17390 "(null)"}})
"
class func mergeVideos(firestUrl : URL , SecondUrl : URL ){
let firstAssets = AVAsset(url: firestUrl)
let secondAssets = AVAsset(url: SecondUrl)
let mixComposition = AVMutableComposition()
let firstTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let seconTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
do{
try firstTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, firstAssets.duration), of: firstAssets.tracks(withMediaType: .video)[0], at: kCMTimeZero)
try seconTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, secondAssets.duration), of: secondAssets.tracks(withMediaType: .video)[0], at: kCMTimeZero)
}catch{
}
let mainInstraction = AVMutableVideoCompositionInstruction()
mainInstraction.timeRange = CMTimeRangeMake(kCMTimeZero, firstAssets.duration)
let firstLayerInstraction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack!)
let scale = CGAffineTransform(scaleX: 0.6, y: 0.6)
let move = CGAffineTransform(translationX: 0, y: 0)
let transform = scale.concatenating(move)
firstLayerInstraction.setTransform(transform, at: kCMTimeZero)
let secondLayerInstraction = AVMutableVideoCompositionLayerInstruction(assetTrack: seconTrack!)
let secondScale = CGAffineTransform(scaleX: 0.9, y: 0.9)
let secondMove = CGAffineTransform(translationX: 0, y: 0)
let secondTransform = secondScale.concatenating(secondMove)
secondLayerInstraction.setTransform(secondTransform, at: kCMTimeZero)
let mainCompositonInst = AVMutableVideoComposition()
mainCompositonInst.instructions = [mainInstraction]
mainCompositonInst.frameDuration = CMTimeMake(1, 30)
mainCompositonInst.renderSize = CGSize(width:(firstTrack?.naturalSize.width)!, height:(firstTrack?.naturalSize.height)! )
let saveUrl = URL(fileURLWithPath: NSHomeDirectory() + "/Documents/1.mp4")
guard let assetExport = AVAssetExportSession(asset: mixComposition, presetName:AVAssetExportPresetHighestQuality) else {return}
assetExport.videoComposition = mainCompositonInst
assetExport.outputFileType = .mp4
assetExport.outputURL = saveUrl
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status{
case AVAssetExportSessionStatus.failed:
print("failed \(assetExport.error)")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(assetExport.error)")
case AVAssetExportSessionStatus.completed:
print("Completed")
default:
print("unknown")
}
})
}
I have the solution to my problem .
I just need to add layerInstractions to AVMutableVideoCompositionInstruction properties and the code will work fine .
this line of code will fix the problem :
mainInstraction.layerInstructions = [firstLayerInstraction , secondLayerInstraction]