I want to add a Image over a video and in order to do so I follow this tutorial:
The code I implemented is this:
func addViewToVideo(fromVideoAt videoURL: URL, withView: UIView, completion: #escaping (URL?) -> Void) {
let imageTransformedFromView = withView.asImage()
let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard let compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid), let assetTrack = asset.tracks(withMediaType: .video).first else{
print("Something is wrong with the asset")
completion(nil)
return
}
do{
let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first, let compositionAudioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid){
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
}catch{
print(error)
completion(nil)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoSize: CGSize
videoSize = CGSize(
width: assetTrack.naturalSize.height,
height: assetTrack.naturalSize.width)
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
addImage(imagePassed: imageTransformedFromView, to: overlayLayer, videoSize: videoSize)
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero,duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPreset1920x1080)
else {
print("Cannot create export session.")
completion(nil)
return
}
let videoName = UUID().uuidString
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(videoName)
.appendingPathExtension("mov")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
completion(exportURL)
default:
print("Something went wrong during export.")
print(export.error ?? "unknown error")
completion(nil)
break
}
}
}
}
func compositionLayerInstruction(for track: AVCompositionTrack, assetTrack: AVAssetTrack) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
var transform = CGAffineTransform.identity
let assetSize = assetTrack.naturalSize
transform = CGAffineTransform(a: 0, b: 1, c: -1, d: 0, tx: assetSize.height, ty: 0)
instruction.setTransform(transform, at: .zero)
return instruction
}
It works pretty fine apart from the fact that when I pass a video from the front camera it doesn't mirror it.
I'm pretty sure the reason why it doesn't mirror it stays behind the CGAffineTransform transformation.
To be honest I don't know how to work with the a,b,c,d,tx,ty constructors, but how could I transform the video such that it seems it's mirrored?
Related
I am trying to add a view over a video and export the newly created URL, but when exporting the AVAssetExportSession I get this error:
com.apple.coremedia.rootQueue.27 (103): EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP, subcode=0x0)
The code that does this exporting is this:
func addViewToVideo(cameraSelection : MyCameraViewController.CameraSelection,fromVideoAt videoURL: URL, withView: UIView, completion: #escaping (URL?) -> Void) {
let imageTransformedFromView = withView.asImage()
let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard let compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid), let assetTrack = asset.tracks(withMediaType: .video).first else{
print("Something is wrong with the asset")
completion(nil)
return
}
do{
let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first, let compositionAudioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid){
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
}catch{
print(error)
completion(nil)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoSize = CGSize(
width: assetTrack.naturalSize.height,
height: assetTrack.naturalSize.width)
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
addImage(imagePassed: imageTransformedFromView, to: overlayLayer, videoSize: videoSize)
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero,duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
cameraSelection: cameraSelection, for: compositionTrack,
assetTrack: assetTrack)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPreset1920x1080)
else {
print("Cannot create export session.")
completion(nil)
return
}
let videoName = UUID().uuidString
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(videoName)
.appendingPathExtension("mov")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
completion(exportURL)
default:
print("Something went wrong during export.")
print(export.error ?? "unknown error")
completion(nil)
break
}
}
}
}
I get this error ONLY WHEN RUNNING ON SIMULATOR and not on a real device? Can please someone tell me what could be the cause?
I am creating an IOS video editing tool by AVMutableComposition.
If I combine multiple videos in a single video mutabletrack, it works fine. However, when I create mutabletrack per video and synthesize the video and play it in AVPlayer, only sound plays and video stops when move on the next video. However, if I seek video, it works normally. What's the cause?
The following is the code in which the current situation occur.
static func mergeClips(
videos: [ExCAVideoLayer]?,
audios: [ExCAAudioLayer]? = nil
) -> AVPlayerItem? {
guard let videos = videos else {
return nil
}
let mixComposition = AVMutableComposition()
var layerInstructions = [AVMutableVideoCompositionLayerInstruction]()
do {
// Determind video aspect ratio
var aspectRatio: Double = 0.00001
for video in videos {
guard let url = video.videoUrl else {
continue
}
if let naturalSize = self.resolutionForLocalVideo(url: url) {
let naturalAspectRatio = naturalSize.height == 0 ? 1 : naturalSize.width / naturalSize.height
if aspectRatio < naturalAspectRatio {
aspectRatio = naturalAspectRatio
}
}
}
var frameWidth = UIScreen.main.bounds.width
var frameHeight = round(UIScreen.main.bounds.width / aspectRatio)
if frameWidth.truncatingRemainder(dividingBy: 2) > 0 {
frameWidth = frameWidth - 1
}
if frameHeight.truncatingRemainder(dividingBy: 2) > 0 {
frameHeight = frameHeight - 1
}
let frameSize = CGSize(width: frameWidth, height: frameHeight)
// Video composition
var currentDuration = mixComposition.duration
for video in videos {
guard let url = video.videoUrl else {
continue
}
let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
let avAsset = AVAsset(url: url)
if let layerInstruction = self.videoCompositionInstruction(videoTrack, asset: avAsset, frameSize: frameSize) {
layerInstructions.append(layerInstruction)
}
let streamRange = CMTimeRangeMake(start: CMTime.zero, duration: CMTimeAdd(avAsset.duration, CMTimeMakeWithSeconds(video.gap, preferredTimescale: 1)))
if let streamVideoTrack = avAsset.tracks(withMediaType: .video).first {
try videoTrack?.insertTimeRange(streamRange, of: streamVideoTrack, at: currentDuration)
videoTrack?.preferredTransform = streamVideoTrack.preferredTransform
}
if let streamAudioTrack = avAsset.tracks(withMediaType: .audio).first {
try audioTrack?.insertTimeRange(streamRange, of: streamAudioTrack, at: currentDuration)
}
currentDuration = mixComposition.duration
}
// Audio composition
if let audios = audios {
for audio in audios {
guard let url = audio.audioUrl else {
continue
}
let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
let avAsset = AVAsset(url: url)
let streamRange = CMTimeRangeMake(start: CMTime.zero, duration: avAsset.duration)
if let streamAudioTrack = avAsset.tracks(withMediaType: .audio).first {
try audioTrack?.insertTimeRange(streamRange, of: streamAudioTrack, at: CMTimeMakeWithSeconds(audio.startTime, preferredTimescale: 1))
}
}
}
//AVVideoComposition and layout
let videoComposition = AVMutableVideoComposition()
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.renderSize = CGSize(width: frameSize.width, height: frameSize.height)
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction .timeRange = CMTimeRange(start: .zero, duration: mixComposition.duration)
mainInstruction.backgroundColor = UIColor.black.cgColor
mainInstruction.layerInstructions = layerInstructions
videoComposition.instructions = [mainInstruction]
let assetKeys = [
"playable",
"hasProtectedContent"
]
let avPlayItem = AVPlayerItem(asset: mixComposition, automaticallyLoadedAssetKeys: assetKeys)
avPlayItem.videoComposition = videoComposition
return avPlayItem
}
catch(let error) {
print("Could not create mixComposition \(error.localizedDescription)")
return nil
}
}
func PlayVideo() {
guard let avPlayerItem = VideoComposition.mergeClips(videos: self.projectRender?.videoItems) else {
return
}
self.avPlayer = AVPlayer(playerItem: avPlayerItem)
self.avPlayerLayer = AVPlayerLayer(player: self.avPlayer)
self.avPlayerLayer?.needsDisplayOnBoundsChange = true
if let avPlayerLayer = self.avPlayerLayer {
self.videoContainer?.layer.addSublayer(avPlayerLayer)
}
self.avPlayer?.play()
}
I have a 30 seconds video clip (1280*720, size around 3MB, *.MP4) and a logo image(4KB, JPG). I've managed to add the watermark and exported video using the following code, but the problem is:
The exported video is around 20MB, could we make the video size less than 5MB while remain 1280*720 resolution or similar quality?
The export process takes around 20 seconds, is it possible to make it faster?
If I set AVAssetExportSession to AVAssetExportPresetMediumQuality, it indeed becomes faster but the video quality isn't good enough
My goal is to make the whole process(add watermark and export) under 5-8 seconds while remain the pretty much same video quality and size? Is it possible? Thanks in advance.
func merge(videoPath: URL, withForegroundImage foregroundImage: UIImage, completion: #escaping (URL?) -> Void) {
let videoUrlAsset = AVURLAsset(url: videoPath, options: nil)
let mutableComposition = AVMutableComposition()
let videoAssetTrack = videoUrlAsset.tracks(withMediaType: AVMediaType.video).first!
let videoCompositionTrack = mutableComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
videoCompositionTrack?.preferredTransform = videoAssetTrack.preferredTransform
try! videoCompositionTrack?.insertTimeRange(CMTimeRange(start:CMTime.zero, duration:videoAssetTrack.timeRange.duration), of: videoAssetTrack, at: CMTime.zero)
addAudioTrack(composition: mutableComposition, videoUrl: videoPath)
let videoSize: CGSize = (videoCompositionTrack?.naturalSize)!
let frame = CGRect(x: 0.0, y: 0.0, width: videoSize.width, height: videoSize.height)
let imageLayer = CALayer()
imageLayer.contents = foregroundImage.cgImage
imageLayer.frame = CGRect(x: 40, y: 20, width:50, height:50)
let videoLayer = CALayer()
videoLayer.frame = frame
let animationLayer = CALayer()
animationLayer.frame = frame
animationLayer.addSublayer(videoLayer)
animationLayer.addSublayer(imageLayer)
let videoComposition = AVMutableVideoComposition(propertiesOf: (videoCompositionTrack?.asset!)!)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: animationLayer)
let documentDirectory = NSSearchPathForDirectoriesInDomains(FileManager.SearchPathDirectory.cachesDirectory, FileManager.SearchPathDomainMask.userDomainMask, true).first!
let documentDirectoryUrl = URL(fileURLWithPath: documentDirectory)
let destinationFilePath = documentDirectoryUrl.appendingPathComponent("result.mp4")
do {
if FileManager.default.fileExists(atPath: destinationFilePath.path) {
try FileManager.default.removeItem(at: destinationFilePath)
print("removed")
}
} catch {
print(error)
}
let exportSession = AVAssetExportSession(asset: mutableComposition, presetName: AVAssetExportPreset1280x720)!
exportSession.videoComposition = videoComposition
exportSession.outputURL = destinationFilePath
exportSession.outputFileType = .mp4
exportSession.exportAsynchronously { [weak exportSession] in
if let strongExportSession = exportSession {
print("exported")
completion(strongExportSession.outputURL!)
}
}
}
private func addAudioTrack(composition: AVMutableComposition, videoUrl: URL) {
let videoUrlAsset = AVURLAsset(url: videoUrl, options: nil)
let audioTracks = videoUrlAsset.tracks(withMediaType: AVMediaType.audio)
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID())!
for audioTrack in audioTracks {
try! compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: CMTime.zero)
}
}
I'm trying two merge 2 videos with a 2 second overlap. In this overlap I'd like to fade the second video in (or fade the first one out to reveal the second, either one would be great).
The first video is fading out 2 seconds before the end as intended, but as it fades I get a black screen instead of the second video fading in. At the end of video 1, video 2 shows up half way through its fade in animation.
What am I doing wrong with the tracks that I can't see them overlapping? Below is my code
func setupVideo() {
let url = URL(fileURLWithPath: Bundle.main.path(forResource: "demoVideoTwo", ofType: "mp4")!)
let assetOne = AVAsset(url: url)
let urlTwo = URL(fileURLWithPath: Bundle.main.path(forResource: "demoVideoThree", ofType: "mp4")!)
let assetTwo = AVAsset(url: urlTwo)
let mixComposition = AVMutableComposition()
var instructions = [AVMutableVideoCompositionLayerInstruction]()
var mainInstructionList = [AVMutableVideoCompositionInstruction]()
var lastTime = CMTime.zero
// Create Track One
guard let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
return
}
// Setup AVAsset 1
let timeRange = CMTimeRangeMake(start: CMTime.zero, duration: assetOne.duration)
do {
try videoTrack.insertTimeRange(timeRange, of: assetOne.tracks(withMediaType: .video)[0], at: lastTime)
try audioTrack.insertTimeRange(timeRange, of: assetOne.tracks(withMediaType: .audio)[0], at: lastTime)
} catch {
print(error)
}
// Setup Layer Instruction 1
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let duration = CMTime(seconds: 2, preferredTimescale: 60)
let transitTime = CMTime(seconds: 2, preferredTimescale: 60)
let insertTime = CMTimeSubtract(assetOne.duration, transitTime)
let instRange = CMTimeRangeMake(start: insertTime, duration: duration)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: instRange)
instructions.append(layerInstruction)
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: lastTime, duration: assetOne.duration)
mainInstruction.layerInstructions = instructions
mainInstructionList.append(mainInstruction)
lastTime = CMTimeAdd(lastTime, assetOne.duration)
// Create Track One
guard let videoTrackTwo = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), let audioTrackTwo = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
return
}
// Setup AVAsset 2
let transitionTime = CMTime(seconds: 2, preferredTimescale: 60)
let newLastTime = CMTimeSubtract(assetOne.duration, transitionTime)
let timeRangeTwo = CMTimeRangeMake(start: CMTime.zero, duration: assetTwo.duration)
do {
try videoTrackTwo.insertTimeRange(timeRangeTwo, of: assetTwo.tracks(withMediaType: .video)[0], at: newLastTime)
try audioTrackTwo.insertTimeRange(timeRangeTwo, of: assetTwo.tracks(withMediaType: .audio)[0], at: newLastTime)
} catch {
print(error)
}
// Setup Layer Instruction 2
let layerInstructionTwo = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrackTwo)
let durationTwo = CMTime(seconds: 4, preferredTimescale: 60)
let instRangeTwo = CMTimeRangeMake(start: newLastTime, duration: durationTwo)
layerInstructionTwo.setOpacityRamp(fromStartOpacity: 0.0, toEndOpacity: 1.0, timeRange: instRangeTwo)
instructions.append(layerInstructionTwo)
let mainInstructionTwo = AVMutableVideoCompositionInstruction()
mainInstructionTwo.timeRange = CMTimeRangeMake(start: lastTime, duration: assetTwo.duration)
mainInstructionTwo.layerInstructions = instructions
mainInstructionList.append(mainInstructionTwo)
// Setup Video Composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = mainInstructionList
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 60)
mainComposition.renderSize = videoTrack.naturalSize
let item = AVPlayerItem(asset: mixComposition)
item.videoComposition = mainComposition
player = AVPlayer(playerItem: item)
let playerLayer: AVPlayerLayer = {
let layer = AVPlayerLayer(player: player)
layer.videoGravity = .resizeAspectFill
return layer
}()
let playerWidth: CGFloat = UIScreen.main.bounds.size.width
let videoHeight = UIScreen.main.bounds.size.width * 9 / 16
playerLayer.frame = CGRect(x: 0, y: 0, width: playerWidth, height: videoHeight)
self.layer.addSublayer(playerLayer)
}
Don't create AVMutableVideoCompositionInstruction for each
video track and don't assign instructions to it each time. Instead, create AVMutableVideoCompositionInstruction just once right before create AVMutableVideoComposition then assign instructions to that.
Here is the code working that way you want.
func setupVideo() {
let url = URL(fileURLWithPath: Bundle.main.path(forResource: "demoVideoTwo", ofType: "mp4")!)
let assetOne = AVAsset(url: url)
let urlTwo = URL(fileURLWithPath: Bundle.main.path(forResource: "demoVideoTwo", ofType: "mp4")!)
let assetTwo = AVAsset(url: urlTwo)
let mixComposition = AVMutableComposition()
var instructions = [AVMutableVideoCompositionLayerInstruction]()
var lastTime = CMTime.zero
// Create Track One
guard let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
return
}
// Setup AVAsset 1
let timeRange = CMTimeRangeMake(start: CMTime.zero, duration: assetOne.duration)
do {
try videoTrack.insertTimeRange(timeRange, of: assetOne.tracks(withMediaType: .video)[0], at: lastTime)
try audioTrack.insertTimeRange(timeRange, of: assetOne.tracks(withMediaType: .audio)[0], at: lastTime)
} catch {
print(error)
}
// Setup Layer Instruction 1
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let duration = CMTime(seconds: 2, preferredTimescale: 60)
let transitTime = CMTime(seconds: 2, preferredTimescale: 60)
let insertTime = CMTimeSubtract(assetOne.duration, transitTime)
let instRange = CMTimeRangeMake(start: insertTime, duration: duration)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: instRange)
instructions.append(layerInstruction)
lastTime = CMTimeAdd(lastTime, assetOne.duration)
// Create Track One
guard let videoTrackTwo = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), let audioTrackTwo = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
return
}
// Setup AVAsset 2
let transitionTime = CMTime(seconds: 2, preferredTimescale: 60)
let newLastTime = CMTimeSubtract(assetOne.duration, transitionTime)
let timeRangeTwo = CMTimeRangeMake(start: CMTime.zero, duration: assetTwo.duration)
do {
try videoTrackTwo.insertTimeRange(timeRangeTwo, of: assetTwo.tracks(withMediaType: .video)[0], at: newLastTime)
try audioTrackTwo.insertTimeRange(timeRangeTwo, of: assetTwo.tracks(withMediaType: .audio)[0], at: newLastTime)
} catch {
print(error)
}
// Setup Layer Instruction 2
let layerInstructionTwo = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrackTwo)
layerInstructionTwo.setOpacity(1.0, at: newLastTime)
instructions.append(layerInstructionTwo)
// Setup Video Composition
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: CMTimeAdd(newLastTime, assetTwo.duration))
mainInstruction.layerInstructions = instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 60)
mainComposition.renderSize = videoTrack.naturalSize
let item = AVPlayerItem(asset: mixComposition)
item.videoComposition = mainComposition
player = AVPlayer(playerItem: item)
let playerLayer: AVPlayerLayer = {
let layer = AVPlayerLayer(player: player)
layer.videoGravity = .resizeAspectFill
return layer
}()
let playerWidth: CGFloat = UIScreen.main.bounds.size.width
let videoHeight = UIScreen.main.bounds.size.width * 9 / 16
playerLayer.frame = CGRect(x: 0, y: 0, width: playerWidth, height: videoHeight)
self.layer.addSublayer(playerLayer)
}
I tried to convert an objective C code to swift 4, The code is adding video over another video http://www.theappguruz.com/blog/ios-overlap-multiple-videos this the objective-c Code
I got this error :
"(Error Domain=AVFoundationErrorDomain Code=-11841 "Operation Stopped" UserInfo={NSLocalizedFailureReason=The video could not be composed., NSLocalizedDescription=Operation Stopped, NSUnderlyingError=0x604000646cf0 {Error Domain=NSOSStatusErrorDomain Code=-17390 "(null)"}})
"
class func mergeVideos(firestUrl : URL , SecondUrl : URL ){
let firstAssets = AVAsset(url: firestUrl)
let secondAssets = AVAsset(url: SecondUrl)
let mixComposition = AVMutableComposition()
let firstTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let seconTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
do{
try firstTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, firstAssets.duration), of: firstAssets.tracks(withMediaType: .video)[0], at: kCMTimeZero)
try seconTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, secondAssets.duration), of: secondAssets.tracks(withMediaType: .video)[0], at: kCMTimeZero)
}catch{
}
let mainInstraction = AVMutableVideoCompositionInstruction()
mainInstraction.timeRange = CMTimeRangeMake(kCMTimeZero, firstAssets.duration)
let firstLayerInstraction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack!)
let scale = CGAffineTransform(scaleX: 0.6, y: 0.6)
let move = CGAffineTransform(translationX: 0, y: 0)
let transform = scale.concatenating(move)
firstLayerInstraction.setTransform(transform, at: kCMTimeZero)
let secondLayerInstraction = AVMutableVideoCompositionLayerInstruction(assetTrack: seconTrack!)
let secondScale = CGAffineTransform(scaleX: 0.9, y: 0.9)
let secondMove = CGAffineTransform(translationX: 0, y: 0)
let secondTransform = secondScale.concatenating(secondMove)
secondLayerInstraction.setTransform(secondTransform, at: kCMTimeZero)
let mainCompositonInst = AVMutableVideoComposition()
mainCompositonInst.instructions = [mainInstraction]
mainCompositonInst.frameDuration = CMTimeMake(1, 30)
mainCompositonInst.renderSize = CGSize(width:(firstTrack?.naturalSize.width)!, height:(firstTrack?.naturalSize.height)! )
let saveUrl = URL(fileURLWithPath: NSHomeDirectory() + "/Documents/1.mp4")
guard let assetExport = AVAssetExportSession(asset: mixComposition, presetName:AVAssetExportPresetHighestQuality) else {return}
assetExport.videoComposition = mainCompositonInst
assetExport.outputFileType = .mp4
assetExport.outputURL = saveUrl
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status{
case AVAssetExportSessionStatus.failed:
print("failed \(assetExport.error)")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(assetExport.error)")
case AVAssetExportSessionStatus.completed:
print("Completed")
default:
print("unknown")
}
})
}
I have the solution to my problem .
I just need to add layerInstractions to AVMutableVideoCompositionInstruction properties and the code will work fine .
this line of code will fix the problem :
mainInstraction.layerInstructions = [firstLayerInstraction , secondLayerInstraction]