Overlap Multiple Videos in swift - iphone

I tried to convert an objective C code to swift 4, The code is adding video over another video http://www.theappguruz.com/blog/ios-overlap-multiple-videos this the objective-c Code
I got this error :
"(Error Domain=AVFoundationErrorDomain Code=-11841 "Operation Stopped" UserInfo={NSLocalizedFailureReason=The video could not be composed., NSLocalizedDescription=Operation Stopped, NSUnderlyingError=0x604000646cf0 {Error Domain=NSOSStatusErrorDomain Code=-17390 "(null)"}})
"
class func mergeVideos(firestUrl : URL , SecondUrl : URL ){
let firstAssets = AVAsset(url: firestUrl)
let secondAssets = AVAsset(url: SecondUrl)
let mixComposition = AVMutableComposition()
let firstTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let seconTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
do{
try firstTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, firstAssets.duration), of: firstAssets.tracks(withMediaType: .video)[0], at: kCMTimeZero)
try seconTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, secondAssets.duration), of: secondAssets.tracks(withMediaType: .video)[0], at: kCMTimeZero)
}catch{
}
let mainInstraction = AVMutableVideoCompositionInstruction()
mainInstraction.timeRange = CMTimeRangeMake(kCMTimeZero, firstAssets.duration)
let firstLayerInstraction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack!)
let scale = CGAffineTransform(scaleX: 0.6, y: 0.6)
let move = CGAffineTransform(translationX: 0, y: 0)
let transform = scale.concatenating(move)
firstLayerInstraction.setTransform(transform, at: kCMTimeZero)
let secondLayerInstraction = AVMutableVideoCompositionLayerInstruction(assetTrack: seconTrack!)
let secondScale = CGAffineTransform(scaleX: 0.9, y: 0.9)
let secondMove = CGAffineTransform(translationX: 0, y: 0)
let secondTransform = secondScale.concatenating(secondMove)
secondLayerInstraction.setTransform(secondTransform, at: kCMTimeZero)
let mainCompositonInst = AVMutableVideoComposition()
mainCompositonInst.instructions = [mainInstraction]
mainCompositonInst.frameDuration = CMTimeMake(1, 30)
mainCompositonInst.renderSize = CGSize(width:(firstTrack?.naturalSize.width)!, height:(firstTrack?.naturalSize.height)! )
let saveUrl = URL(fileURLWithPath: NSHomeDirectory() + "/Documents/1.mp4")
guard let assetExport = AVAssetExportSession(asset: mixComposition, presetName:AVAssetExportPresetHighestQuality) else {return}
assetExport.videoComposition = mainCompositonInst
assetExport.outputFileType = .mp4
assetExport.outputURL = saveUrl
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status{
case AVAssetExportSessionStatus.failed:
print("failed \(assetExport.error)")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(assetExport.error)")
case AVAssetExportSessionStatus.completed:
print("Completed")
default:
print("unknown")
}
})
}

I have the solution to my problem .
I just need to add layerInstractions to AVMutableVideoCompositionInstruction properties and the code will work fine .
this line of code will fix the problem :
mainInstraction.layerInstructions = [firstLayerInstraction , secondLayerInstraction]

Related

Error when adding view over a video and exporting URL asynchronously - swift

I am trying to add a view over a video and export the newly created URL, but when exporting the AVAssetExportSession I get this error:
com.apple.coremedia.rootQueue.27 (103): EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP, subcode=0x0)
The code that does this exporting is this:
func addViewToVideo(cameraSelection : MyCameraViewController.CameraSelection,fromVideoAt videoURL: URL, withView: UIView, completion: #escaping (URL?) -> Void) {
let imageTransformedFromView = withView.asImage()
let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard let compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid), let assetTrack = asset.tracks(withMediaType: .video).first else{
print("Something is wrong with the asset")
completion(nil)
return
}
do{
let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first, let compositionAudioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid){
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
}catch{
print(error)
completion(nil)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoSize = CGSize(
width: assetTrack.naturalSize.height,
height: assetTrack.naturalSize.width)
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
addImage(imagePassed: imageTransformedFromView, to: overlayLayer, videoSize: videoSize)
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero,duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
cameraSelection: cameraSelection, for: compositionTrack,
assetTrack: assetTrack)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPreset1920x1080)
else {
print("Cannot create export session.")
completion(nil)
return
}
let videoName = UUID().uuidString
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(videoName)
.appendingPathExtension("mov")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
completion(exportURL)
default:
print("Something went wrong during export.")
print(export.error ?? "unknown error")
completion(nil)
break
}
}
}
}
I get this error ONLY WHEN RUNNING ON SIMULATOR and not on a real device? Can please someone tell me what could be the cause?

Adding image on video with AVMutableComposition - Swift - Programmatically

I want to add a Image over a video and in order to do so I follow this tutorial:
The code I implemented is this:
func addViewToVideo(fromVideoAt videoURL: URL, withView: UIView, completion: #escaping (URL?) -> Void) {
let imageTransformedFromView = withView.asImage()
let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard let compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid), let assetTrack = asset.tracks(withMediaType: .video).first else{
print("Something is wrong with the asset")
completion(nil)
return
}
do{
let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first, let compositionAudioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid){
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
}catch{
print(error)
completion(nil)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoSize: CGSize
videoSize = CGSize(
width: assetTrack.naturalSize.height,
height: assetTrack.naturalSize.width)
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
addImage(imagePassed: imageTransformedFromView, to: overlayLayer, videoSize: videoSize)
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero,duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPreset1920x1080)
else {
print("Cannot create export session.")
completion(nil)
return
}
let videoName = UUID().uuidString
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(videoName)
.appendingPathExtension("mov")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
completion(exportURL)
default:
print("Something went wrong during export.")
print(export.error ?? "unknown error")
completion(nil)
break
}
}
}
}
func compositionLayerInstruction(for track: AVCompositionTrack, assetTrack: AVAssetTrack) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
var transform = CGAffineTransform.identity
let assetSize = assetTrack.naturalSize
transform = CGAffineTransform(a: 0, b: 1, c: -1, d: 0, tx: assetSize.height, ty: 0)
instruction.setTransform(transform, at: .zero)
return instruction
}
It works pretty fine apart from the fact that when I pass a video from the front camera it doesn't mirror it.
I'm pretty sure the reason why it doesn't mirror it stays behind the CGAffineTransform transformation.
To be honest I don't know how to work with the a,b,c,d,tx,ty constructors, but how could I transform the video such that it seems it's mirrored?

How do I merge 2 videos with a fade transition?

I'm trying two merge 2 videos with a 2 second overlap. In this overlap I'd like to fade the second video in (or fade the first one out to reveal the second, either one would be great).
The first video is fading out 2 seconds before the end as intended, but as it fades I get a black screen instead of the second video fading in. At the end of video 1, video 2 shows up half way through its fade in animation.
What am I doing wrong with the tracks that I can't see them overlapping? Below is my code
func setupVideo() {
let url = URL(fileURLWithPath: Bundle.main.path(forResource: "demoVideoTwo", ofType: "mp4")!)
let assetOne = AVAsset(url: url)
let urlTwo = URL(fileURLWithPath: Bundle.main.path(forResource: "demoVideoThree", ofType: "mp4")!)
let assetTwo = AVAsset(url: urlTwo)
let mixComposition = AVMutableComposition()
var instructions = [AVMutableVideoCompositionLayerInstruction]()
var mainInstructionList = [AVMutableVideoCompositionInstruction]()
var lastTime = CMTime.zero
// Create Track One
guard let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
return
}
// Setup AVAsset 1
let timeRange = CMTimeRangeMake(start: CMTime.zero, duration: assetOne.duration)
do {
try videoTrack.insertTimeRange(timeRange, of: assetOne.tracks(withMediaType: .video)[0], at: lastTime)
try audioTrack.insertTimeRange(timeRange, of: assetOne.tracks(withMediaType: .audio)[0], at: lastTime)
} catch {
print(error)
}
// Setup Layer Instruction 1
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let duration = CMTime(seconds: 2, preferredTimescale: 60)
let transitTime = CMTime(seconds: 2, preferredTimescale: 60)
let insertTime = CMTimeSubtract(assetOne.duration, transitTime)
let instRange = CMTimeRangeMake(start: insertTime, duration: duration)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: instRange)
instructions.append(layerInstruction)
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: lastTime, duration: assetOne.duration)
mainInstruction.layerInstructions = instructions
mainInstructionList.append(mainInstruction)
lastTime = CMTimeAdd(lastTime, assetOne.duration)
// Create Track One
guard let videoTrackTwo = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), let audioTrackTwo = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
return
}
// Setup AVAsset 2
let transitionTime = CMTime(seconds: 2, preferredTimescale: 60)
let newLastTime = CMTimeSubtract(assetOne.duration, transitionTime)
let timeRangeTwo = CMTimeRangeMake(start: CMTime.zero, duration: assetTwo.duration)
do {
try videoTrackTwo.insertTimeRange(timeRangeTwo, of: assetTwo.tracks(withMediaType: .video)[0], at: newLastTime)
try audioTrackTwo.insertTimeRange(timeRangeTwo, of: assetTwo.tracks(withMediaType: .audio)[0], at: newLastTime)
} catch {
print(error)
}
// Setup Layer Instruction 2
let layerInstructionTwo = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrackTwo)
let durationTwo = CMTime(seconds: 4, preferredTimescale: 60)
let instRangeTwo = CMTimeRangeMake(start: newLastTime, duration: durationTwo)
layerInstructionTwo.setOpacityRamp(fromStartOpacity: 0.0, toEndOpacity: 1.0, timeRange: instRangeTwo)
instructions.append(layerInstructionTwo)
let mainInstructionTwo = AVMutableVideoCompositionInstruction()
mainInstructionTwo.timeRange = CMTimeRangeMake(start: lastTime, duration: assetTwo.duration)
mainInstructionTwo.layerInstructions = instructions
mainInstructionList.append(mainInstructionTwo)
// Setup Video Composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = mainInstructionList
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 60)
mainComposition.renderSize = videoTrack.naturalSize
let item = AVPlayerItem(asset: mixComposition)
item.videoComposition = mainComposition
player = AVPlayer(playerItem: item)
let playerLayer: AVPlayerLayer = {
let layer = AVPlayerLayer(player: player)
layer.videoGravity = .resizeAspectFill
return layer
}()
let playerWidth: CGFloat = UIScreen.main.bounds.size.width
let videoHeight = UIScreen.main.bounds.size.width * 9 / 16
playerLayer.frame = CGRect(x: 0, y: 0, width: playerWidth, height: videoHeight)
self.layer.addSublayer(playerLayer)
}
Don't create AVMutableVideoCompositionInstruction for each
video track and don't assign instructions to it each time. Instead, create AVMutableVideoCompositionInstruction just once right before create AVMutableVideoComposition then assign instructions to that.
Here is the code working that way you want.
func setupVideo() {
let url = URL(fileURLWithPath: Bundle.main.path(forResource: "demoVideoTwo", ofType: "mp4")!)
let assetOne = AVAsset(url: url)
let urlTwo = URL(fileURLWithPath: Bundle.main.path(forResource: "demoVideoTwo", ofType: "mp4")!)
let assetTwo = AVAsset(url: urlTwo)
let mixComposition = AVMutableComposition()
var instructions = [AVMutableVideoCompositionLayerInstruction]()
var lastTime = CMTime.zero
// Create Track One
guard let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
return
}
// Setup AVAsset 1
let timeRange = CMTimeRangeMake(start: CMTime.zero, duration: assetOne.duration)
do {
try videoTrack.insertTimeRange(timeRange, of: assetOne.tracks(withMediaType: .video)[0], at: lastTime)
try audioTrack.insertTimeRange(timeRange, of: assetOne.tracks(withMediaType: .audio)[0], at: lastTime)
} catch {
print(error)
}
// Setup Layer Instruction 1
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let duration = CMTime(seconds: 2, preferredTimescale: 60)
let transitTime = CMTime(seconds: 2, preferredTimescale: 60)
let insertTime = CMTimeSubtract(assetOne.duration, transitTime)
let instRange = CMTimeRangeMake(start: insertTime, duration: duration)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: instRange)
instructions.append(layerInstruction)
lastTime = CMTimeAdd(lastTime, assetOne.duration)
// Create Track One
guard let videoTrackTwo = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), let audioTrackTwo = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
return
}
// Setup AVAsset 2
let transitionTime = CMTime(seconds: 2, preferredTimescale: 60)
let newLastTime = CMTimeSubtract(assetOne.duration, transitionTime)
let timeRangeTwo = CMTimeRangeMake(start: CMTime.zero, duration: assetTwo.duration)
do {
try videoTrackTwo.insertTimeRange(timeRangeTwo, of: assetTwo.tracks(withMediaType: .video)[0], at: newLastTime)
try audioTrackTwo.insertTimeRange(timeRangeTwo, of: assetTwo.tracks(withMediaType: .audio)[0], at: newLastTime)
} catch {
print(error)
}
// Setup Layer Instruction 2
let layerInstructionTwo = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrackTwo)
layerInstructionTwo.setOpacity(1.0, at: newLastTime)
instructions.append(layerInstructionTwo)
// Setup Video Composition
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: CMTimeAdd(newLastTime, assetTwo.duration))
mainInstruction.layerInstructions = instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 60)
mainComposition.renderSize = videoTrack.naturalSize
let item = AVPlayerItem(asset: mixComposition)
item.videoComposition = mainComposition
player = AVPlayer(playerItem: item)
let playerLayer: AVPlayerLayer = {
let layer = AVPlayerLayer(player: player)
layer.videoGravity = .resizeAspectFill
return layer
}()
let playerWidth: CGFloat = UIScreen.main.bounds.size.width
let videoHeight = UIScreen.main.bounds.size.width * 9 / 16
playerLayer.frame = CGRect(x: 0, y: 0, width: playerWidth, height: videoHeight)
self.layer.addSublayer(playerLayer)
}

Rotate video 90 degrees in Swift 4

I've been trying to search for this all day, but all answers point to older versions of Swift or Obj-C.
I tried layer instructions, but AVMutableComposition has no member instructions. I remember this being really easy with just an affineTransform, but now I no longer know where I found this.
var mainVideoURL:URL!
let paths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let tempPath = paths[0] + "/mainVideo.mp4"
if(FileManager.default.fileExists(atPath: tempPath)){
guard (try? FileManager.default.removeItem(atPath: tempPath)) != nil else {
print("remove path failed")
self.enableButtons(enabled:true)
return
}
}
mainVideoURL = URL(fileURLWithPath: tempPath)
let firstAsset = AVURLAsset(url: fileURL)
let mixComposition = AVMutableComposition()
// repeat video number of times
let videoRepeat = photoVideoRepeats
for i in 0 ... videoRepeat - 1 {
do {
try mixComposition.insertTimeRange(CMTimeRangeMake(kCMTimeZero, firstAsset.duration),
of: firstAsset,
at: kCMTimeZero + CMTimeMultiply(firstAsset.duration,Int32(i)))
} catch _ {
print("Failed to load first track")
}
}
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
After my video was recorded, I was able to do a CGAffineTransform on a AVMutableCompositionTrack.
In my case I needed to merge an audio track with the video, but you can see where the transforms take place:
func mergeVideoAndAudio(videoUrl: URL,
audioUrl: URL) -> AVAsset {
let mixComposition = AVMutableComposition()
var mutableCompositionVideoTrack = [AVMutableCompositionTrack]()
var mutableCompositionAudioTrack = [AVMutableCompositionTrack]()
var mutableCompositionAudioOfVideoTrack = [AVMutableCompositionTrack]()
//start merge
let aVideoAsset = AVAsset(url: videoUrl)
let aAudioAsset = AVAsset(url: audioUrl)
let compositionAddVideo = mixComposition.addMutableTrack(withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid)
let compositionAddAudio = mixComposition.addMutableTrack(withMediaType: .audio,
preferredTrackID: kCMPersistentTrackID_Invalid)
let compositionAddAudioOfVideo = mixComposition.addMutableTrack(withMediaType: .audio,
preferredTrackID: kCMPersistentTrackID_Invalid)
let aVideoAssetTrack: AVAssetTrack = aVideoAsset.tracks(withMediaType: AVMediaType.video)[0]
let aAudioOfVideoAssetTrack: AVAssetTrack? = aVideoAsset.tracks(withMediaType: AVMediaType.audio).first
let aAudioAssetTrack: AVAssetTrack = aAudioAsset.tracks(withMediaType: AVMediaType.audio)[0]
// Default must have tranformation
compositionAddVideo?.preferredTransform = aVideoAssetTrack.preferredTransform
var transforms = aVideoAssetTrack.preferredTransform
if UIDevice.current.orientation == UIDeviceOrientation.landscapeLeft {
transforms = transforms.concatenating(CGAffineTransform(rotationAngle: CGFloat(-90.0 * .pi / 180)))
transforms = transforms.concatenating(CGAffineTransform(translationX: 1280, y: 0))
}
else if UIDevice.current.orientation == UIDeviceOrientation.landscapeRight {
transforms = transforms.concatenating(CGAffineTransform(rotationAngle: CGFloat(90.0 * .pi / 180)))
transforms = transforms.concatenating(CGAffineTransform(translationX: 1280, y: 0))
}
else if UIDevice.current.orientation == UIDeviceOrientation.portraitUpsideDown {
transforms = transforms.concatenating(CGAffineTransform(rotationAngle: CGFloat(180.0 * .pi / 180)))
transforms = transforms.concatenating(CGAffineTransform(translationX: 0, y: 720))
}
compositionAddVideo?.preferredTransform = transforms
mutableCompositionVideoTrack.append(compositionAddVideo!)
mutableCompositionAudioTrack.append(compositionAddAudio!)
mutableCompositionAudioOfVideoTrack.append(compositionAddAudioOfVideo!)
do {
try mutableCompositionVideoTrack[0].insertTimeRange(CMTimeRangeMake(start: CMTime.zero,
duration: aVideoAssetTrack.timeRange.duration),
of: aVideoAssetTrack,
at: CMTime.zero)
//In my case my audio file is longer then video file so i took videoAsset duration
//instead of audioAsset duration
try mutableCompositionAudioTrack[0].insertTimeRange(CMTimeRangeMake(start: CMTime.zero,
duration: aVideoAssetTrack.timeRange.duration),
of: aAudioAssetTrack,
at: CMTime.zero)
// adding audio (of the video if exists) asset to the final composition
if let aAudioOfVideoAssetTrack = aAudioOfVideoAssetTrack {
try mutableCompositionAudioOfVideoTrack[0].insertTimeRange(CMTimeRangeMake(start: CMTime.zero,
duration: aVideoAssetTrack.timeRange.duration),
of: aAudioOfVideoAssetTrack,
at: CMTime.zero)
}
} catch {
print(error.localizedDescription)
}
return mixComposition
}

Composing Video and Audio - Video's audio is gone

My question is, I am using the function below, to compose a video and audio. I want to keep video's original sound but it goes away somehow, I do not have any clue.
I got this function from this answer
I tried to change volumes right after appending AVMutableCompositionTracks but it did not work
For instance;
mutableVideoCompositionTrack.prefferedVolume = 1.0
mutableAudioCompositionTrack.prefferedVolume = 0.05
But still, all you can hear is only the audio file.
The function;
private func mergeAudioAndVideo(audioUrl: URL, videoUrl: URL, completion: #escaping (Bool)->Void){
let mixComposition = AVMutableComposition()
var mutableCompositionVideoTrack : [AVMutableCompositionTrack] = []
var mutableCompositionAudioTrack : [AVMutableCompositionTrack] = []
let totalVideoCompositionInstruction = AVMutableVideoCompositionInstruction()
let videoAsset = AVAsset(url: videoUrl)
let audioAsset = AVAsset(url: audioUrl)
mutableCompositionVideoTrack.append(mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid))
mutableCompositionAudioTrack.append(mixComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid))
mutableCompositionAudioTrack[0].preferredVolume = 0.05
mutableCompositionVideoTrack[0].preferredVolume = 1.0
let videoAssetTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
let audioAssetTrack = audioAsset.tracks(withMediaType: AVMediaTypeAudio)[0]
do {
try mutableCompositionVideoTrack[0].insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAssetTrack.timeRange.duration), of: videoAssetTrack, at: kCMTimeZero)
try mutableCompositionAudioTrack[0].insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAssetTrack.timeRange.duration), of: audioAssetTrack, at: kCMTimeZero)
}catch{
print("ERROR#1")
}
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, videoAssetTrack.timeRange.duration)
let mutableVideoComposition = AVMutableVideoComposition()
mutableVideoComposition.frameDuration = CMTimeMake(1, 30)
mutableVideoComposition.renderSize = CGSize(width: 1280, height: 720)
//exporting
savePathUrl = try! FileManager.default.url(for: FileManager.SearchPathDirectory.documentDirectory, in: FileManager.SearchPathDomainMask.userDomainMask, appropriateFor: nil, create: true).appendingPathComponent("merged").appendingPathExtension("mov")
let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)!
assetExport.outputFileType = AVFileTypeMPEG4
assetExport.outputURL = savePathUrl
assetExport.shouldOptimizeForNetworkUse = true
do {
try FileManager.default.removeItem(at: savePathUrl)
}catch {
print(error)
}
assetExport.exportAsynchronously {
switch assetExport.status{
case .completed:
print("completed")
completion(true)
default:
print("failed \(assetExport.error!)")
completion(false)
}
}
}
You can adjust volume for video and audio separately #Faruk, Here a is little bit code for that.
//Extract audio from the video and the music
let audioMix: AVMutableAudioMix = AVMutableAudioMix()
var audioMixParam: [AVMutableAudioMixInputParameters] = []
let assetVideoTrack: AVAssetTrack = assetVideo.tracksWithMediaType(AVMediaTypeAudio)[0]
let assetMusicTrack: AVAssetTrack = assetMusic.tracksWithMediaType(AVMediaTypeAudio)[0]
let videoParam: AVMutableAudioMixInputParameters = AVMutableAudioMixInputParameters(track: assetVideoTrack)
videoParam.trackID = compositionAudioVideo.trackID
let musicParam: AVMutableAudioMixInputParameters = AVMutableAudioMixInputParameters(track: assetMusicTrack)
musicParam.trackID = compositionAudioMusic.trackID
//Set final volume of the audio record and the music
videoParam.setVolume(volumeVideo, atTime: kCMTimeZero)
musicParam.setVolume(volumeAudio, atTime: kCMTimeZero)
//Add setting
audioMixParam.append(musicParam)
audioMixParam.append(videoParam)
//Add audio on final record
//First: the audio of the record and Second: the music
do {
try compositionAudioVideo.insertTimeRange(CMTimeRangeMake(kCMTimeZero, assetVideo.duration), ofTrack: assetVideoTrack, atTime: kCMTimeZero)
} catch _ {
assertionFailure()
}
do {
try compositionAudioMusic.insertTimeRange(CMTimeRangeMake(CMTimeMake(Int64(startAudioTime * 10000), 10000), assetVideo.duration), ofTrack: assetMusicTrack, atTime: kCMTimeZero)
} catch _ {
assertionFailure()
}
//Add parameter
audioMix.inputParameters = audioMixParam
let completeMovie = "\(docsDir)/\(randomString(5)).mp4"
let completeMovieUrl = NSURL(fileURLWithPath: completeMovie)
let exporter: AVAssetExportSession = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality)!
exporter.outputURL = completeMovieUrl
exporter.outputFileType = AVFileTypeMPEG4
exporter.audioMix = audioMix
exporter.exportAsynchronouslyWithCompletionHandler({
switch exporter.status {
case AVAssetExportSessionStatus.Completed:
print("success with output url \(completeMovieUrl)")
case AVAssetExportSessionStatus.Failed:
print("failed \(String(exporter.error))")
case AVAssetExportSessionStatus.Cancelled:
print("cancelled \(String(exporter.error))")
default:
print("complete")
}
})
}
Here's the combined code of all the answers. It took me a while to decode those answers so I decided to add it here for any future users:
Swift 5:
enum MixError: Error {
case TimeRangeFailure
case ExportFailure
}
var selectedVideoLevel = 1.0
var selectedMusicLevel = 1.0
func mix(videoUrl: URL, musicUrl: URL, completion: ((Result<URL, Error>) -> Void)?) {
let videoAsset = AVAsset(url: videoUrl)
let musicAsset = AVAsset(url: musicUrl)
let audioVideoComposition = AVMutableComposition()
let audioMix = AVMutableAudioMix()
var mixParameters = [AVMutableAudioMixInputParameters]()
let videoCompositionTrack = audioVideoComposition
.addMutableTrack(withMediaType: .video, preferredTrackID: .init())!
let audioCompositionTrack = audioVideoComposition
.addMutableTrack(withMediaType: .audio, preferredTrackID: .init())!
let musicCompositionTrack = audioVideoComposition
.addMutableTrack(withMediaType: .audio, preferredTrackID: .init())!
let videoAssetTrack = videoAsset.tracks(withMediaType: .video)[0]
let audioAssetTrack = videoAsset.tracks(withMediaType: .audio).first
let musicAssetTrack = musicAsset.tracks(withMediaType: .audio)[0]
let audioParameters = AVMutableAudioMixInputParameters(track: audioAssetTrack)
audioParameters.trackID = audioCompositionTrack.trackID
let musicParameters = AVMutableAudioMixInputParameters(track: musicAssetTrack)
musicParameters.trackID = musicCompositionTrack.trackID
audioParameters.setVolume(selectedVideoLevel, at: .zero)
musicParameters.setVolume(selectedMusicLevel, at: .zero)
mixParameters.append(audioParameters)
mixParameters.append(musicParameters)
audioMix.inputParameters = mixParameters
/// prevents video from unnecessary rotations
videoCompositionTrack.preferredTransform = videoAssetTrack.preferredTransform
do {
let timeRange = CMTimeRange(start: .zero, duration: videoAsset.duration)
try videoCompositionTrack.insertTimeRange(timeRange, of: videoAssetTrack, at: .zero)
if let audioAssetTrack = audioAssetTrack {
try audioCompositionTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
try musicCompositionTrack.insertTimeRange(timeRange, of: musicAssetTrack, at: .zero)
} catch {
completion?(.failure(MixError.TimeRangeFailure)
}
let exportUrl = FileManager.default
.urls(for: .applicationSupportDirectory, in: .userDomainMask).first?
.appendingPathComponent("\(Date().timeIntervalSince1970)-video.mp4")
let exportSession = AVAssetExportSession(
asset: audioVideoComposition,
presetName: AVAssetExportPresetHighestQuality
)
exportSession?.audioMix = audioMix
exportSession?.outputFileType = .m4v
exportSession?.outputURL = exportUrl
exportSession?.exportAsynchronously(completionHandler: {
guard let status = exportSession?.status else { return }
switch status {
case .completed:
completion?(.success(exportUrl!))
case .failed:
completion?(.failure(MixError.ExportError)
default:
print(status)
}
})
}
I figured it out. It seems an AVAsset which loads a video holds the audio and video separately. So you can reach them writing``
videoAsset.tracks(withMediaType: AVMediaTypeAudio)[0] //audio of a video
videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0] //video of a video(without sound)
So I added these lines to the code and it worked!
var mutableCompositionBackTrack : [AVMutableCompositionTrack] = []
mutableCompositionBackTrack.append(mixComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid))
try mutableCompositionBackTrack[0].insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAssetTrack.timeRange.duration), of: backAssetTrack, at: kCMTimeZero)
There is a still missing point that I do not know how to do, and that is setting volumes of these audio assets. I will update this answer as soon as I figure out how.