copyNextSampleBuffer hanging for AVAssetReaderTrackOutput - swift

I am trying to create a video compression tool which simply takes in a video file URL, compresses it, and returns the new URL in swift 5 for ios 16.
I have been following a few tutorials online, but they all use functions which have since been deprecated, so I have put together this code here which is a refactoring from various sources:
import Foundation
import AVFoundation
class VideoCompressorModel: ObservableObject {
let bitrate = 2_500_000 // MBPs
func compressFile(urlToCompress: URL, outputURL: URL, completionHandler:#escaping (URL?)->Void) async {
let asset = AVAsset(url: urlToCompress);
//create asset reader
var assetReader: AVAssetReader?
do{
assetReader = try AVAssetReader(asset: asset)
} catch{
assetReader = nil
}
guard let reader = assetReader else{
completionHandler(nil)
return
}
var videoTrack: AVAssetTrack?
var audioTrack: AVAssetTrack?
do {
videoTrack = try await asset.loadTracks(withMediaType: AVMediaType.video).first
audioTrack = try await asset.loadTracks(withMediaType: AVMediaType.audio).first
} catch {
completionHandler(nil)
return
}
guard let videoTrack, let audioTrack else {
completionHandler(nil)
return
}
let videoReaderSettings: [String:Any] = [kCVPixelBufferPixelFormatTypeKey as String:kCVPixelFormatType_32ARGB ]
// ADJUST BIT RATE OF VIDEO HERE
var videoSettings: [String:Any]?
do {
videoSettings = await [
AVVideoCompressionPropertiesKey: [AVVideoAverageBitRateKey:self.bitrate],
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoHeightKey: try videoTrack.load(.naturalSize).height,
AVVideoWidthKey: try videoTrack.load(.naturalSize).width
]
} catch {
completionHandler(nil)
return
}
guard let videoSettings else {
completionHandler(nil)
return
}
let audioSettings = [
AVSampleRateKey: 44100,
AVFormatIDKey: kAudioFormatLinearPCM
]
let assetReaderVideoOutput = AVAssetReaderTrackOutput(track: videoTrack, outputSettings: videoReaderSettings)
let assetReaderAudioOutput = AVAssetReaderTrackOutput(track: audioTrack, outputSettings: audioSettings)
if reader.canAdd(assetReaderVideoOutput){
reader.add(assetReaderVideoOutput)
}else{
completionHandler(nil)
return
}
if reader.canAdd(assetReaderAudioOutput){
reader.add(assetReaderAudioOutput)
} else{
completionHandler(nil)
return
}
let audioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: nil)
let videoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
do {
videoInput.transform = try await videoTrack.load(.preferredTransform)
} catch{
completionHandler(nil)
return
}
//we need to add samples to the video input
let videoInputQueue = DispatchQueue(label: "videoQueue")
let audioInputQueue = DispatchQueue(label: "audioQueue")
var assetWriter: AVAssetWriter?
do{
assetWriter = try AVAssetWriter(outputURL: outputURL, fileType: AVFileType.mov)
} catch {
assetWriter = nil
}
guard let assetWriter else{
completionHandler(nil)
return
}
assetWriter.shouldOptimizeForNetworkUse = true
assetWriter.add(videoInput)
assetWriter.add(audioInput)
assetWriter.startWriting()
reader.startReading()
assetWriter.startSession(atSourceTime: CMTime.zero)
let closeWriterBothDone:()->Void = {
assetWriter.finishWriting(completionHandler: {
completionHandler((assetWriter.outputURL))
})
reader.cancelReading()
}
print("STARTING ")
let closeWriterAudioDone:()->Void = {
print("FINISHED AUDIO")
videoInput.requestMediaDataWhenReady(on: videoInputQueue) {
while(videoInput.isReadyForMoreMediaData){
let sample = assetReaderVideoOutput.copyNextSampleBuffer()
if (sample != nil){
videoInput.append(sample!)
} else{
videoInput.markAsFinished()
DispatchQueue.main.async {
closeWriterBothDone()
}
break;
}
}
}
}
audioInput.requestMediaDataWhenReady(on: audioInputQueue) {
while(audioInput.isReadyForMoreMediaData){
let sample = assetReaderAudioOutput.copyNextSampleBuffer()
print("hi")
if (sample != nil){
audioInput.append(sample!)
} else{
audioInput.markAsFinished()
DispatchQueue.main.async {
closeWriterAudioDone()
}
break;
}
}
}
}
}
The issue that occurs is that copyNextSampleBuffer for the audio track output hangs after a few calls. I test this simply by having a print("hi") which only gets called a handful of times before the code blocks.
I have tried changing the audio options provided to AVAssetReaderTrackOutput, which I originally had simply as nil, and this only had the effect of increasing the number of times copyNextSampleBuffer gets called before it freezes.
Perhaps there is a better way overall to compress video in newer versions of swift, as this seems somewhat unelegant? If not, is there any bugs in my code that is causing it to hang?

Related

Swift AVFoundation instructions don't set the opacity

I have a function that merges videos. All videos merge properly and the first two videos will play perfectly but then only the audio for the third video plays. I am assuming the video is there, but it is just blocked by the second video. I am confused though because I am using instructions to set the opacity of each asset to 0 when the video is done, but it doesn't work. Also even if I don't set any instructions, the first video disappears to allow the second video to play, but the second video never disappears. What is going on!?
func mergeVideo(completion: #escaping (_ url: URL?, _ error: Error?) -> Void) {
let mixComposition = AVMutableComposition()
let videoComposition = AVMutableVideoComposition()
guard let documentDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else {
completion(nil, nil)
return
}
let outputURL = documentDirectory.appendingPathComponent("\(id).mov")
do {
if FileManager.default.fileExists(atPath: outputURL.path) {
try FileManager.default.removeItem(at: outputURL)
}
} catch {
print(error.localizedDescription)
}
// If there is only one video, save export time.
if let video = videos.first, videos.count == 1 {
do {
if let url = URL(string: video.videoURL) {
try FileManager().copyItem(at: url, to: outputURL)
completion(outputURL, nil)
mergedVideoURL = outputURL.lastPathComponent
}
} catch let error {
completion(nil, error)
}
return
}
var currentTime = CMTime.zero
let renderSize = CGSize(width: 1280.0, height: 720.0)
let mainInstruction = AVMutableVideoCompositionInstruction()
videos.enumerated().forEach { index, video in
if let vidURL = URL(string: video.videoURL)?.lastPathComponent {
let url = documentDirectory.appendingPathComponent(vidURL)
let asset = AVAsset(url: url)
guard let assetTrack = asset.tracks.first else { return }
mainInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: CMTimeAdd(mixComposition.duration, asset.duration))
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: assetTrack)
instruction.setOpacity(0.0, at: asset.duration)
mainInstruction.layerInstructions.append(instruction)
do {
let timeRange = CMTimeRangeMake(start: .zero, duration: asset.duration)
try mixComposition.insertTimeRange(timeRange, of: asset, at: currentTime)
currentTime = CMTimeAdd(currentTime, asset.duration)
} catch let error {
completion(nil, error)
}
}
}//forEach
videoComposition.instructions = [mainInstruction]
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
videoComposition.renderSize = renderSize
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetPassthrough) else {
completion(nil, nil)
return
}
exporter.outputURL = outputURL
exporter.outputFileType = .mov
// Pass Video Composition to the Exporter.
exporter.videoComposition = videoComposition
exporter.exportAsynchronously {
DispatchQueue.main.async {
switch exporter.status {
case .completed:
completion(exporter.outputURL, nil)
case .failed:
completion(exporter.outputURL, exporter.error)
case.cancelled:
completion(exporter.outputURL, exporter.error)
case .unknown:
completion(exporter.outputURL, exporter.error)
case .waiting:
print("waiting")
case .exporting:
print("exporting")
#unknown default:
completion(exporter.outputURL, exporter.error)
}
}
}
}

AVAssetWriter recorded audio no sound

My app can record the audio from the chat and save it in file. I recorded some music on the app screen but when I playback the audio.m4a file there is no sound coming out. The file show as "Apple MPEG-4 audio" and has 12KB size. Did I config the setting wrong? Thanks in advence.
edit: I added the stop recording function.
var assetWriter: AVAssetWriter?
var input: AVAssetWriterInput?
var channelLayout = AudioChannelLayout()
func record() {
guard let doc = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else {
return
}
let inputURL = docURL.appendingPathComponent("audio.m4a")
do {
try assetWriter = AVAssetWriter(outputURL: inputURL, fileType: .m4a)
} catch {
print("error: \(error)")
assetWriter = nil
return
}
guard let assetWriter = assetWriter else {
return
}
channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_MPEG_5_1_D
let audioSettings: [String : Any] = [
AVNumberOfChannelsKey: 6,
AVFormatIDKey: kAudioFormatMPEG4AAC_HE,
AVSampleRateKey: 44100,
AVEncoderBitRateKey: 128000,
AVChannelLayoutKey: NSData(bytes: &channelLayout, length: MemoryLayout.size(ofValue: channelLayout)),
]
input = AVAssetWriterInput(mediaType: .audio, outputSettings: settings)
guard let audioInput = input else {
print("Failed to find input.")
return
}
audioInput.expectsMediaDataInRealTime = true
if ((assetWriter.canAdd(audioInput)) != nil) {
assetWriter.add(audioInput)
}
RPScreenRecorder.shared().startCapture(handler: { (sample, bufferType, error) in
guard error == nil else {
print("Failed to capture with error: \(String(describing: error))")
return
}
if bufferType == .audioApp {
if assetWriter.status == AVAssetWriter.Status.unknown {
if ((assetWriter.startWriting()) != nil) {
assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sample))
}
}
if assetWriter.status == AVAssetWriter.Status.writing {
if audioInput.isReadyForMoreMediaData == true {
if audioInput.append(sample) == false {
}
}
}
}
})
}
func stopRecord() {
RPScreenRecorder.shared().stopCapture{ (error) in
self.audioInput.markAsFinished()
if error == nil{
self.assetWriter.finishWriting {
print("finish writing")
}
} else {
print(error as Any)
}
}
}
In light of your comments, you definitely don't need six channel audio. Try these simpler mono audio settings.
let audioSettings: [String : Any] = [
AVNumberOfChannelsKey: 1,
AVFormatIDKey: kAudioFormatMPEG4AAC,
AVSampleRateKey: 44100,
AVEncoderBitRateKey: 128000,
]
You don't say whether this is on iOS or macOS. You have a problem on macOS because as of 11.2.1 no .audioApp buffers are captured. If you still want microphone, you can configure that:
let recorder = RPScreenRecorder.shared()
recorder.isMicrophoneEnabled = true
recorder.startCapture(handler: { (sample, bufferType, error) in
if bufferType == .audioMic {
// etc
}
})
Don't bother checking for writer status, just append buffers when you can
if audioInput.isReadyForMoreMediaData {
if !audioInput.append(sample) {
// do something
}
}
PREVIOUSLY
You need to call assetWriter.finishWriting at some point.
It's interesting that you have 6 channel input. Are you using a special device or some kind of virtual device?

unable to play video when app rebuild swift

video not playing when app rebuild (file path url saving into coreData) using AVCaptureSession
filepath not changing before and after rebuild.
file:///private/var/mobile/Containers/Data/Application/3DA93FBC-9A20-40B4-A017-B3D5C7768301/tmp/63F6CEED-3202-4F5F-999B-5F138D73635D.mp4
i did all the ways, nothing works
here my code for record the video
func setupPreview() {
// Configure previewLayer
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = shapeLayer.bounds
previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
shapeLayer.layer.addSublayer(previewLayer!)
}
func setupSession() -> Bool {
captureSession.sessionPreset = AVCaptureSession.Preset.high
// Setup Camera
let camera = AVCaptureDevice.default(for: AVMediaType.video)!
do {
let input = try AVCaptureDeviceInput(device: camera)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
activeInput = input
}
} catch {
print("Error setting device video input: \(error)")
return false
}
// Setup Microphone
let microphone = AVCaptureDevice.default(for: AVMediaType.audio)!
do {
let micInput = try AVCaptureDeviceInput(device: microphone)
if captureSession.canAddInput(micInput) {
captureSession.addInput(micInput)
}
} catch {
print("Error setting device audio input: \(error)")
return false
}
// Movie output
if captureSession.canAddOutput(movieOutput) {
captureSession.addOutput(movieOutput)
}
return true
}
func startSession() {
if !captureSession.isRunning {
videoQueue().async {
self.captureSession.startRunning()
}
}
}
func stopSession() {
if captureSession.isRunning {
videoQueue().async {
self.captureSession.stopRunning()
}
}
}
func videoQueue() -> DispatchQueue {
return DispatchQueue.main
}
func currentVideoOrientation() -> AVCaptureVideoOrientation {
var orientation: AVCaptureVideoOrientation
switch UIDevice.current.orientation {
case .portrait:
orientation = AVCaptureVideoOrientation.portrait
case .landscapeRight:
orientation = AVCaptureVideoOrientation.landscapeLeft
case .portraitUpsideDown:
orientation = AVCaptureVideoOrientation.portraitUpsideDown
default:
orientation = AVCaptureVideoOrientation.landscapeRight
}
return orientation
}
func startRecording() {
if movieOutput.isRecording == false {
save.setTitle("stop", for: UIControl.State.normal)
let connection = movieOutput.connection(with: AVMediaType.video)
if (connection?.isVideoOrientationSupported)! {
connection?.videoOrientation = currentVideoOrientation()
}
if (connection?.isVideoStabilizationSupported)! {
connection?.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
}
let device = activeInput.device
if (device.isSmoothAutoFocusSupported) {
do {
try device.lockForConfiguration()
device.isSmoothAutoFocusEnabled = false
device.unlockForConfiguration()
} catch {
print("Error setting configuration: \(error)")
}
}
//EDIT2: And I forgot this
outputURL = tempURL()
movieOutput.startRecording(to: outputURL, recordingDelegate: self)
}
else {
stopRecording()
}
}
func tempURL() -> URL? {
let directory = NSTemporaryDirectory() as NSString
let path = directory.appendingPathComponent(NSUUID().uuidString + ".mp4")
path22 = path
let directoryURL: URL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
let folderPath: URL = directoryURL.appendingPathComponent("Downloads", isDirectory: true)
let fileURL: URL = folderPath.appendingPathComponent(path)
return URL(fileURLWithPath: path)
}
func stopRecording() {
if movieOutput.isRecording == true {
movieOutput.stopRecording()
}
}
here saving into coredata
let managedObject = self.managedObjectContext
entity = NSEntityDescription.entity(forEntityName: "MediaData", in: managedObject!)
let personMO = NSManagedObject(entity: entity, insertInto: managedObject)
personMO.setValue("\(self.videoURL!)", forKey: "videosS")
personMO.setValue(dataImage, forKey: "thumbnails")
print(personMO)
do
{
try managedObject?.save()
print("video saved")
}
catch
{
print("Catch Erroe : Failed To
}
let appdel = UIApplication.shared.delegate as! AppDelegate
appdel.avplayer = AVPlayer(url: videoURL!)
print(videoURL!)
let playerLayer = AVPlayerLayer(player: appdel.avplayer)
playerLayer.frame = self.view.bounds
self.view.layer.addSublayer(playerLayer)
appdel.avplayer?.play()
You must never save a full filepath into CoreData or anywhere else. File paths are not persistent. Your app is sandboxed. The sandbox path can change at any time, especially between launches and installations.
Instead, save the file name and reconstruct the path each time you need it. Just as you are calling FileManager.default.urls(for: .documentDirectory...) to construct the file path initially, so you must call it every time you want to access this file.

Recording video and audio with AVAssetWriter in Swift

Im trying to add the devices microphone audio to a video recording from the devices camera. The video is filtered with a CIFilter and works as expected. My problem is the mic audio is not attached to the video once saved.
I have tried setting the audio settings manually like this
let audioSettings : [String : Any] = [
AVFormatIDKey : kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey: 1,
AVSampleRateKey : 44100,
AVEncoderBitRateKey : 64000
]
but using the recommendedAudioSettingsForAssetWriter method seems like the correct approach as the video recording works with the recommendedAudioSettingsForAssetWriter method.
Can anyone tell me how to achieve this or point me in the right direction?
My code so far:
import UIKit
import AVFoundation
class VideoViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
#IBOutlet weak var imageView: UIImageView!
lazy var cameraDevice: AVCaptureDevice? = {
return AVCaptureDevice.default(for: .video)
}()
lazy var micDevice: AVCaptureDevice? = {
return AVCaptureDevice.default(for: .audio)
}()
var captureSession = AVCaptureSession()
var outputURL: URL!
var orientation: AVCaptureVideoOrientation = .landscapeRight
var filterObject = FilterObject()
var assetWriter: AVAssetWriter?
var assetWriterInput: AVAssetWriterInput?
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor?
var fileName = ""
var recordingState = RecordingState.idle
var time: Double = 0
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
let context = CIContext()
override func viewDidLoad() {
super.viewDidLoad()
setupCameraDevice()
setupAudioDevice()
setupInputOutput()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
setUpAuthStatus()
}
#IBAction func recordPressed(_ sender: UIButton) {
switch recordingState {
case .idle:
recordingState = .start
case .capturing:
recordingState = .end
default:
break
}
}
func setUpAuthStatus() {
if AVCaptureDevice.authorizationStatus(for: AVMediaType.video) != .authorized {
AVCaptureDevice.requestAccess(for: AVMediaType.video, completionHandler: { (authorized) in
DispatchQueue.main.async {
if authorized {
self.setupInputOutput()
}
}
})
}
if AVCaptureDevice.authorizationStatus(for: AVMediaType.audio) != .authorized {
AVCaptureDevice.requestAccess(for: AVMediaType.audio, completionHandler: { (authorized) in
DispatchQueue.main.async {
if authorized {
self.setupInputOutput()
}
}
})
}
}
func setupCameraDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == .back {
cameraDevice = device
}
}
}
func setupAudioDevice() {
let audioDeviceDisoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInMicrophone], mediaType: .audio, position: .unspecified)
let devices = audioDeviceDisoverySession.devices
micDevice = devices[0]
}
func setupInputOutput() {
do {
guard let cameraDevice = cameraDevice else { return }
let captureDeviceInput = try AVCaptureDeviceInput(device: cameraDevice)
guard let micDevice = micDevice else { return }
let micDeviceInput = try AVCaptureDeviceInput(device: micDevice)
captureSession.sessionPreset = AVCaptureSession.Preset.hd1920x1080
if captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
}
if captureSession.canAddInput(micDeviceInput) {
captureSession.addInput(micDeviceInput)
}
let queue = DispatchQueue(label: "com.apple.sample.capturepipeline.video", attributes: [])
if captureSession.canAddOutput(videoOutput) {
videoOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(videoOutput)
}
if captureSession.canAddOutput(audioOutput) {
audioOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(audioOutput)
}
captureSession.commitConfiguration()
captureSession.startRunning()
} catch {
print(error)
}
}
func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
audioOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
let cameraImage = CIImage(cvImageBuffer: imageBuffer)
guard let name = filterObject.name else {return}
let effect = FilterType.genericFilter(name: name, cameraImage: cameraImage)
effect.setValue(cameraImage, forKey: kCIInputImageKey)
TableData.setFilterValues(withFilterName: name, effect: effect, values: [value1, value2])
guard let outputImage = effect.outputImage else { return }
context.render(outputImage, to: imageBuffer)
guard let cgImage = self.context.createCGImage(outputImage, from: cameraImage.extent) else { return }
DispatchQueue.main.async {
let filteredImage = UIImage(cgImage: cgImage)
self.imageView.image = filteredImage
}
let timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer).seconds
switch recordingState {
case .start:
fileName = UUID().uuidString
let videoPath = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(fileName).mov")
let writer = try! AVAssetWriter(outputURL: videoPath, fileType: .mov)
let videoSettings = videoOutput.recommendedVideoSettingsForAssetWriter(writingTo: .mov)
let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
videoInput.mediaTimeScale = CMTimeScale(bitPattern: 600)
videoInput.expectsMediaDataInRealTime = true
let audioSettings = audioOutput.recommendedAudioSettingsForAssetWriter(writingTo: .m4a)
let audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings as? [String : Any])
audioInput.expectsMediaDataInRealTime = true
//videoInput.transform = CGAffineTransform(rotationAngle: .pi/2)
let pixelAdapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: nil)
if writer.canAdd(videoInput) {
writer.add(videoInput)
}
if writer.canAdd(audioInput) {
writer.add(audioInput)
}
writer.startWriting()
writer.startSession(atSourceTime: .zero)
assetWriter = writer
assetWriterInput = videoInput
pixelBufferAdaptor = pixelAdapter
recordingState = .capturing
time = timestamp
case .capturing:
if assetWriterInput?.isReadyForMoreMediaData == true {
let newTime = CMTime(seconds: timestamp - time, preferredTimescale: CMTimeScale(600))
pixelBufferAdaptor?.append(imageBuffer, withPresentationTime: newTime)
}
break
case .end:
guard assetWriterInput?.isReadyForMoreMediaData == true, assetWriter!.status != .failed else { break }
let url = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(fileName).mov")
assetWriterInput?.markAsFinished()
assetWriter?.finishWriting { [weak self] in
self?.recordingState = .idle
self?.assetWriter = nil
self?.assetWriterInput = nil
DispatchQueue.main.async {
let activity = UIActivityViewController(activityItems: [url], applicationActivities: nil)
self?.present(activity, animated: true, completion: nil)
}
}
default:
break
}
}
}
Your audio settings do not look correct. The AVSampleRateKey should come from the number of samples from the description of the first audio sample that comes in. Your value of 44100 should be set as the value for the AVEncoderBitRateKey and that should maybe be set to AVEncoderBitRateKey: Int(48_000)
To get the number of sample first call
let fmt = CMSampleBufferGetFormatDescription(sampleBuffer)
let asbd = CMAudioFormatDescriptionGetStreamBasicDescription(fmt!)
and then the sample rate will be asbd?.pointee.mSampleRate and then that should be set as the AVSampleRateKey value in the audio settings (I think)

After compressing my audio file, why can I not play the file?

Audio file will not play after reducing it using AVAssetReader/ AVAssetWriter
At the moment, the whole function is being executed fine, with no errors thrown.
For some reason, when I go inside the document directory of the simulator via terminal, the audio file will not play through iTunes and comes up with error when trying to open with quicktime "QuickTime Player can't open "test1.m4a"
Does anyone specialise in this area and understand why this isn't working?
protocol FileConverterDelegate {
func fileConversionCompleted()
}
class WKAudioTools: NSObject {
var delegate: FileConverterDelegate?
var url: URL?
var assetReader: AVAssetReader?
var assetWriter: AVAssetWriter?
func convertAudio() {
let documentDirectory = try! FileManager.default.url(for: .documentDirectory, in: .userDomainMask, appropriateFor: nil, create: true)
let exportURL = documentDirectory.appendingPathComponent(Assets.soundName1).appendingPathExtension("m4a")
url = Bundle.main.url(forResource: Assets.soundName1, withExtension: Assets.mp3)
guard let assetURL = url else { return }
let asset = AVAsset(url: assetURL)
//reader
do {
assetReader = try AVAssetReader(asset: asset)
} catch let error {
print("Error with reading >> \(error.localizedDescription)")
}
let assetReaderOutput = AVAssetReaderAudioMixOutput(audioTracks: asset.tracks, audioSettings: nil)
//let assetReaderOutput = AVAssetReaderTrackOutput(track: track!, outputSettings: nil)
guard let assetReader = assetReader else {
print("reader is nil")
return
}
if assetReader.canAdd(assetReaderOutput) == false {
print("Can't add output to the reader ☹️")
return
}
assetReader.add(assetReaderOutput)
// writer
do {
assetWriter = try AVAssetWriter(outputURL: exportURL, fileType: .m4a)
} catch let error {
print("Error with writing >> \(error.localizedDescription)")
}
var channelLayout = AudioChannelLayout()
memset(&channelLayout, 0, MemoryLayout.size(ofValue: channelLayout))
channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo
// use different values to affect the downsampling/compression
let outputSettings: [String: Any] = [AVFormatIDKey: kAudioFormatMPEG4AAC,
AVSampleRateKey: 44100.0,
AVNumberOfChannelsKey: 2,
AVEncoderBitRateKey: 128000,
AVChannelLayoutKey: NSData(bytes: &channelLayout, length: MemoryLayout.size(ofValue: channelLayout))]
let assetWriterInput = AVAssetWriterInput(mediaType: .audio, outputSettings: outputSettings)
guard let assetWriter = assetWriter else { return }
if assetWriter.canAdd(assetWriterInput) == false {
print("Can't add asset writer input ☹️")
return
}
assetWriter.add(assetWriterInput)
assetWriterInput.expectsMediaDataInRealTime = false
// MARK: - File conversion
assetWriter.startWriting()
assetReader.startReading()
let audioTrack = asset.tracks[0]
let startTime = CMTime(seconds: 0, preferredTimescale: audioTrack.naturalTimeScale)
assetWriter.startSession(atSourceTime: startTime)
// We need to do this on another thread, so let's set up a dispatch group...
var convertedByteCount = 0
let dispatchGroup = DispatchGroup()
let mediaInputQueue = DispatchQueue(label: "mediaInputQueue")
//... and go
dispatchGroup.enter()
assetWriterInput.requestMediaDataWhenReady(on: mediaInputQueue) {
while assetWriterInput.isReadyForMoreMediaData {
let nextBuffer = assetReaderOutput.copyNextSampleBuffer()
if nextBuffer != nil {
assetWriterInput.append(nextBuffer!) // FIXME: Handle this safely
convertedByteCount += CMSampleBufferGetTotalSampleSize(nextBuffer!)
} else {
// done!
assetWriterInput.markAsFinished()
assetReader.cancelReading()
dispatchGroup.leave()
DispatchQueue.main.async {
// Notify delegate that conversion is complete
self.delegate?.fileConversionCompleted()
print("Process complete 🎧")
if assetWriter.status == .failed {
print("Writing asset failed ☹️ Error: ", assetWriter.error)
}
}
break
}
}
}
}
}
You need to call finishWriting on your AVAssetWriter to get the output completely written:
assetWriter.finishWriting {
DispatchQueue.main.async {
// Notify delegate that conversion is complete
self.delegate?.fileConversionCompleted()
print("Process complete 🎧")
if assetWriter.status == .failed {
print("Writing asset failed ☹️ Error: ", assetWriter.error)
}
}
}
If exportURL exists before you start the conversion, you should remove it, otherwise the conversion will fail:
try! FileManager.default.removeItem(at: exportURL)
As #matt points out, why the buffer stuff when you could do the conversion more simply with an AVAssetExportSession, and also why convert one of your own assets when you could distribute it already in the desired format?