unable to play video when app rebuild swift - swift

video not playing when app rebuild (file path url saving into coreData) using AVCaptureSession
filepath not changing before and after rebuild.
file:///private/var/mobile/Containers/Data/Application/3DA93FBC-9A20-40B4-A017-B3D5C7768301/tmp/63F6CEED-3202-4F5F-999B-5F138D73635D.mp4
i did all the ways, nothing works
here my code for record the video
func setupPreview() {
// Configure previewLayer
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = shapeLayer.bounds
previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
shapeLayer.layer.addSublayer(previewLayer!)
}
func setupSession() -> Bool {
captureSession.sessionPreset = AVCaptureSession.Preset.high
// Setup Camera
let camera = AVCaptureDevice.default(for: AVMediaType.video)!
do {
let input = try AVCaptureDeviceInput(device: camera)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
activeInput = input
}
} catch {
print("Error setting device video input: \(error)")
return false
}
// Setup Microphone
let microphone = AVCaptureDevice.default(for: AVMediaType.audio)!
do {
let micInput = try AVCaptureDeviceInput(device: microphone)
if captureSession.canAddInput(micInput) {
captureSession.addInput(micInput)
}
} catch {
print("Error setting device audio input: \(error)")
return false
}
// Movie output
if captureSession.canAddOutput(movieOutput) {
captureSession.addOutput(movieOutput)
}
return true
}
func startSession() {
if !captureSession.isRunning {
videoQueue().async {
self.captureSession.startRunning()
}
}
}
func stopSession() {
if captureSession.isRunning {
videoQueue().async {
self.captureSession.stopRunning()
}
}
}
func videoQueue() -> DispatchQueue {
return DispatchQueue.main
}
func currentVideoOrientation() -> AVCaptureVideoOrientation {
var orientation: AVCaptureVideoOrientation
switch UIDevice.current.orientation {
case .portrait:
orientation = AVCaptureVideoOrientation.portrait
case .landscapeRight:
orientation = AVCaptureVideoOrientation.landscapeLeft
case .portraitUpsideDown:
orientation = AVCaptureVideoOrientation.portraitUpsideDown
default:
orientation = AVCaptureVideoOrientation.landscapeRight
}
return orientation
}
func startRecording() {
if movieOutput.isRecording == false {
save.setTitle("stop", for: UIControl.State.normal)
let connection = movieOutput.connection(with: AVMediaType.video)
if (connection?.isVideoOrientationSupported)! {
connection?.videoOrientation = currentVideoOrientation()
}
if (connection?.isVideoStabilizationSupported)! {
connection?.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
}
let device = activeInput.device
if (device.isSmoothAutoFocusSupported) {
do {
try device.lockForConfiguration()
device.isSmoothAutoFocusEnabled = false
device.unlockForConfiguration()
} catch {
print("Error setting configuration: \(error)")
}
}
//EDIT2: And I forgot this
outputURL = tempURL()
movieOutput.startRecording(to: outputURL, recordingDelegate: self)
}
else {
stopRecording()
}
}
func tempURL() -> URL? {
let directory = NSTemporaryDirectory() as NSString
let path = directory.appendingPathComponent(NSUUID().uuidString + ".mp4")
path22 = path
let directoryURL: URL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
let folderPath: URL = directoryURL.appendingPathComponent("Downloads", isDirectory: true)
let fileURL: URL = folderPath.appendingPathComponent(path)
return URL(fileURLWithPath: path)
}
func stopRecording() {
if movieOutput.isRecording == true {
movieOutput.stopRecording()
}
}
here saving into coredata
let managedObject = self.managedObjectContext
entity = NSEntityDescription.entity(forEntityName: "MediaData", in: managedObject!)
let personMO = NSManagedObject(entity: entity, insertInto: managedObject)
personMO.setValue("\(self.videoURL!)", forKey: "videosS")
personMO.setValue(dataImage, forKey: "thumbnails")
print(personMO)
do
{
try managedObject?.save()
print("video saved")
}
catch
{
print("Catch Erroe : Failed To
}
let appdel = UIApplication.shared.delegate as! AppDelegate
appdel.avplayer = AVPlayer(url: videoURL!)
print(videoURL!)
let playerLayer = AVPlayerLayer(player: appdel.avplayer)
playerLayer.frame = self.view.bounds
self.view.layer.addSublayer(playerLayer)
appdel.avplayer?.play()

You must never save a full filepath into CoreData or anywhere else. File paths are not persistent. Your app is sandboxed. The sandbox path can change at any time, especially between launches and installations.
Instead, save the file name and reconstruct the path each time you need it. Just as you are calling FileManager.default.urls(for: .documentDirectory...) to construct the file path initially, so you must call it every time you want to access this file.

Related

copyNextSampleBuffer hanging for AVAssetReaderTrackOutput

I am trying to create a video compression tool which simply takes in a video file URL, compresses it, and returns the new URL in swift 5 for ios 16.
I have been following a few tutorials online, but they all use functions which have since been deprecated, so I have put together this code here which is a refactoring from various sources:
import Foundation
import AVFoundation
class VideoCompressorModel: ObservableObject {
let bitrate = 2_500_000 // MBPs
func compressFile(urlToCompress: URL, outputURL: URL, completionHandler:#escaping (URL?)->Void) async {
let asset = AVAsset(url: urlToCompress);
//create asset reader
var assetReader: AVAssetReader?
do{
assetReader = try AVAssetReader(asset: asset)
} catch{
assetReader = nil
}
guard let reader = assetReader else{
completionHandler(nil)
return
}
var videoTrack: AVAssetTrack?
var audioTrack: AVAssetTrack?
do {
videoTrack = try await asset.loadTracks(withMediaType: AVMediaType.video).first
audioTrack = try await asset.loadTracks(withMediaType: AVMediaType.audio).first
} catch {
completionHandler(nil)
return
}
guard let videoTrack, let audioTrack else {
completionHandler(nil)
return
}
let videoReaderSettings: [String:Any] = [kCVPixelBufferPixelFormatTypeKey as String:kCVPixelFormatType_32ARGB ]
// ADJUST BIT RATE OF VIDEO HERE
var videoSettings: [String:Any]?
do {
videoSettings = await [
AVVideoCompressionPropertiesKey: [AVVideoAverageBitRateKey:self.bitrate],
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoHeightKey: try videoTrack.load(.naturalSize).height,
AVVideoWidthKey: try videoTrack.load(.naturalSize).width
]
} catch {
completionHandler(nil)
return
}
guard let videoSettings else {
completionHandler(nil)
return
}
let audioSettings = [
AVSampleRateKey: 44100,
AVFormatIDKey: kAudioFormatLinearPCM
]
let assetReaderVideoOutput = AVAssetReaderTrackOutput(track: videoTrack, outputSettings: videoReaderSettings)
let assetReaderAudioOutput = AVAssetReaderTrackOutput(track: audioTrack, outputSettings: audioSettings)
if reader.canAdd(assetReaderVideoOutput){
reader.add(assetReaderVideoOutput)
}else{
completionHandler(nil)
return
}
if reader.canAdd(assetReaderAudioOutput){
reader.add(assetReaderAudioOutput)
} else{
completionHandler(nil)
return
}
let audioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: nil)
let videoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
do {
videoInput.transform = try await videoTrack.load(.preferredTransform)
} catch{
completionHandler(nil)
return
}
//we need to add samples to the video input
let videoInputQueue = DispatchQueue(label: "videoQueue")
let audioInputQueue = DispatchQueue(label: "audioQueue")
var assetWriter: AVAssetWriter?
do{
assetWriter = try AVAssetWriter(outputURL: outputURL, fileType: AVFileType.mov)
} catch {
assetWriter = nil
}
guard let assetWriter else{
completionHandler(nil)
return
}
assetWriter.shouldOptimizeForNetworkUse = true
assetWriter.add(videoInput)
assetWriter.add(audioInput)
assetWriter.startWriting()
reader.startReading()
assetWriter.startSession(atSourceTime: CMTime.zero)
let closeWriterBothDone:()->Void = {
assetWriter.finishWriting(completionHandler: {
completionHandler((assetWriter.outputURL))
})
reader.cancelReading()
}
print("STARTING ")
let closeWriterAudioDone:()->Void = {
print("FINISHED AUDIO")
videoInput.requestMediaDataWhenReady(on: videoInputQueue) {
while(videoInput.isReadyForMoreMediaData){
let sample = assetReaderVideoOutput.copyNextSampleBuffer()
if (sample != nil){
videoInput.append(sample!)
} else{
videoInput.markAsFinished()
DispatchQueue.main.async {
closeWriterBothDone()
}
break;
}
}
}
}
audioInput.requestMediaDataWhenReady(on: audioInputQueue) {
while(audioInput.isReadyForMoreMediaData){
let sample = assetReaderAudioOutput.copyNextSampleBuffer()
print("hi")
if (sample != nil){
audioInput.append(sample!)
} else{
audioInput.markAsFinished()
DispatchQueue.main.async {
closeWriterAudioDone()
}
break;
}
}
}
}
}
The issue that occurs is that copyNextSampleBuffer for the audio track output hangs after a few calls. I test this simply by having a print("hi") which only gets called a handful of times before the code blocks.
I have tried changing the audio options provided to AVAssetReaderTrackOutput, which I originally had simply as nil, and this only had the effect of increasing the number of times copyNextSampleBuffer gets called before it freezes.
Perhaps there is a better way overall to compress video in newer versions of swift, as this seems somewhat unelegant? If not, is there any bugs in my code that is causing it to hang?

Why does adding an AVCapturePhotoOutput to my AVCaptureSession cause the AVCaptureVideoPreviewLayer to turn break on iPhone X?

After configuring my video layer as such:
public class VideoLayerView: UIView {
override public class var layerClass: Swift.AnyClass {
return AVCaptureVideoPreviewLayer.self
}
public override func awakeFromNib() {
super.awakeFromNib()
self.clipsToBounds = true
}
public func configureCaptureLayer(session: AVCaptureSession?) {
guard let captureLayer = self.layer as? AVCaptureVideoPreviewLayer else { return }
captureLayer.session = session
captureLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
}
}
And setting up the session:
func initializeCamera() {
guard !captureSession.isRunning else {
print("Capture session already running")
return
}
guard Permissions.shared.isCameraAuthorized else {
print("Requesting Camera Permission")
Permissions.shared.requestCamera { _ in
DispatchQueue.main.async {
self.initializeCamera()
}
}
return
}
captureSession.beginConfiguration()
captureSession.sessionPreset = .photo
if let captureDevice = self.captureDevice {
if let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice), captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
} else {
print("Failed to add capture device input.")
}
}
photoOutput.maxPhotoQualityPrioritization = .quality
if captureSession.canAddOutput(photoOutput) {
photoOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(photoOutput)
} else {
print("Failed to add photo output")
}
captureSession.commitConfiguration()
if let connection = photoOutput.connection(with: .video) {
connection.preferredVideoStabilizationMode = .standard
}
videolayerView.configureCaptureLayer(session: captureSession)
sessionQueue.async { [weak self] in
self?.captureSession.startRunning()
}
}
the preview layer displays black on my iPhone X. However it continues to function on other test devices. Removing:
if captureSession.canAddOutput(photoOutput) {
photoOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(photoOutput)
} else {
print("Failed to add photo output")
}
resolves the issue but this code is necessary to capture photos. Why is this not working?
This is due to camera state bug in iOS from code written prior (and since removed) that incorrectly configured the camera. Restart your device and the camera will resume functioning.

Recording video and audio with AVAssetWriter in Swift

Im trying to add the devices microphone audio to a video recording from the devices camera. The video is filtered with a CIFilter and works as expected. My problem is the mic audio is not attached to the video once saved.
I have tried setting the audio settings manually like this
let audioSettings : [String : Any] = [
AVFormatIDKey : kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey: 1,
AVSampleRateKey : 44100,
AVEncoderBitRateKey : 64000
]
but using the recommendedAudioSettingsForAssetWriter method seems like the correct approach as the video recording works with the recommendedAudioSettingsForAssetWriter method.
Can anyone tell me how to achieve this or point me in the right direction?
My code so far:
import UIKit
import AVFoundation
class VideoViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
#IBOutlet weak var imageView: UIImageView!
lazy var cameraDevice: AVCaptureDevice? = {
return AVCaptureDevice.default(for: .video)
}()
lazy var micDevice: AVCaptureDevice? = {
return AVCaptureDevice.default(for: .audio)
}()
var captureSession = AVCaptureSession()
var outputURL: URL!
var orientation: AVCaptureVideoOrientation = .landscapeRight
var filterObject = FilterObject()
var assetWriter: AVAssetWriter?
var assetWriterInput: AVAssetWriterInput?
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor?
var fileName = ""
var recordingState = RecordingState.idle
var time: Double = 0
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
let context = CIContext()
override func viewDidLoad() {
super.viewDidLoad()
setupCameraDevice()
setupAudioDevice()
setupInputOutput()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
setUpAuthStatus()
}
#IBAction func recordPressed(_ sender: UIButton) {
switch recordingState {
case .idle:
recordingState = .start
case .capturing:
recordingState = .end
default:
break
}
}
func setUpAuthStatus() {
if AVCaptureDevice.authorizationStatus(for: AVMediaType.video) != .authorized {
AVCaptureDevice.requestAccess(for: AVMediaType.video, completionHandler: { (authorized) in
DispatchQueue.main.async {
if authorized {
self.setupInputOutput()
}
}
})
}
if AVCaptureDevice.authorizationStatus(for: AVMediaType.audio) != .authorized {
AVCaptureDevice.requestAccess(for: AVMediaType.audio, completionHandler: { (authorized) in
DispatchQueue.main.async {
if authorized {
self.setupInputOutput()
}
}
})
}
}
func setupCameraDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == .back {
cameraDevice = device
}
}
}
func setupAudioDevice() {
let audioDeviceDisoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInMicrophone], mediaType: .audio, position: .unspecified)
let devices = audioDeviceDisoverySession.devices
micDevice = devices[0]
}
func setupInputOutput() {
do {
guard let cameraDevice = cameraDevice else { return }
let captureDeviceInput = try AVCaptureDeviceInput(device: cameraDevice)
guard let micDevice = micDevice else { return }
let micDeviceInput = try AVCaptureDeviceInput(device: micDevice)
captureSession.sessionPreset = AVCaptureSession.Preset.hd1920x1080
if captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
}
if captureSession.canAddInput(micDeviceInput) {
captureSession.addInput(micDeviceInput)
}
let queue = DispatchQueue(label: "com.apple.sample.capturepipeline.video", attributes: [])
if captureSession.canAddOutput(videoOutput) {
videoOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(videoOutput)
}
if captureSession.canAddOutput(audioOutput) {
audioOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(audioOutput)
}
captureSession.commitConfiguration()
captureSession.startRunning()
} catch {
print(error)
}
}
func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
audioOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
let cameraImage = CIImage(cvImageBuffer: imageBuffer)
guard let name = filterObject.name else {return}
let effect = FilterType.genericFilter(name: name, cameraImage: cameraImage)
effect.setValue(cameraImage, forKey: kCIInputImageKey)
TableData.setFilterValues(withFilterName: name, effect: effect, values: [value1, value2])
guard let outputImage = effect.outputImage else { return }
context.render(outputImage, to: imageBuffer)
guard let cgImage = self.context.createCGImage(outputImage, from: cameraImage.extent) else { return }
DispatchQueue.main.async {
let filteredImage = UIImage(cgImage: cgImage)
self.imageView.image = filteredImage
}
let timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer).seconds
switch recordingState {
case .start:
fileName = UUID().uuidString
let videoPath = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(fileName).mov")
let writer = try! AVAssetWriter(outputURL: videoPath, fileType: .mov)
let videoSettings = videoOutput.recommendedVideoSettingsForAssetWriter(writingTo: .mov)
let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
videoInput.mediaTimeScale = CMTimeScale(bitPattern: 600)
videoInput.expectsMediaDataInRealTime = true
let audioSettings = audioOutput.recommendedAudioSettingsForAssetWriter(writingTo: .m4a)
let audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings as? [String : Any])
audioInput.expectsMediaDataInRealTime = true
//videoInput.transform = CGAffineTransform(rotationAngle: .pi/2)
let pixelAdapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: nil)
if writer.canAdd(videoInput) {
writer.add(videoInput)
}
if writer.canAdd(audioInput) {
writer.add(audioInput)
}
writer.startWriting()
writer.startSession(atSourceTime: .zero)
assetWriter = writer
assetWriterInput = videoInput
pixelBufferAdaptor = pixelAdapter
recordingState = .capturing
time = timestamp
case .capturing:
if assetWriterInput?.isReadyForMoreMediaData == true {
let newTime = CMTime(seconds: timestamp - time, preferredTimescale: CMTimeScale(600))
pixelBufferAdaptor?.append(imageBuffer, withPresentationTime: newTime)
}
break
case .end:
guard assetWriterInput?.isReadyForMoreMediaData == true, assetWriter!.status != .failed else { break }
let url = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(fileName).mov")
assetWriterInput?.markAsFinished()
assetWriter?.finishWriting { [weak self] in
self?.recordingState = .idle
self?.assetWriter = nil
self?.assetWriterInput = nil
DispatchQueue.main.async {
let activity = UIActivityViewController(activityItems: [url], applicationActivities: nil)
self?.present(activity, animated: true, completion: nil)
}
}
default:
break
}
}
}
Your audio settings do not look correct. The AVSampleRateKey should come from the number of samples from the description of the first audio sample that comes in. Your value of 44100 should be set as the value for the AVEncoderBitRateKey and that should maybe be set to AVEncoderBitRateKey: Int(48_000)
To get the number of sample first call
let fmt = CMSampleBufferGetFormatDescription(sampleBuffer)
let asbd = CMAudioFormatDescriptionGetStreamBasicDescription(fmt!)
and then the sample rate will be asbd?.pointee.mSampleRate and then that should be set as the AVSampleRateKey value in the audio settings (I think)

After compressing my audio file, why can I not play the file?

Audio file will not play after reducing it using AVAssetReader/ AVAssetWriter
At the moment, the whole function is being executed fine, with no errors thrown.
For some reason, when I go inside the document directory of the simulator via terminal, the audio file will not play through iTunes and comes up with error when trying to open with quicktime "QuickTime Player can't open "test1.m4a"
Does anyone specialise in this area and understand why this isn't working?
protocol FileConverterDelegate {
func fileConversionCompleted()
}
class WKAudioTools: NSObject {
var delegate: FileConverterDelegate?
var url: URL?
var assetReader: AVAssetReader?
var assetWriter: AVAssetWriter?
func convertAudio() {
let documentDirectory = try! FileManager.default.url(for: .documentDirectory, in: .userDomainMask, appropriateFor: nil, create: true)
let exportURL = documentDirectory.appendingPathComponent(Assets.soundName1).appendingPathExtension("m4a")
url = Bundle.main.url(forResource: Assets.soundName1, withExtension: Assets.mp3)
guard let assetURL = url else { return }
let asset = AVAsset(url: assetURL)
//reader
do {
assetReader = try AVAssetReader(asset: asset)
} catch let error {
print("Error with reading >> \(error.localizedDescription)")
}
let assetReaderOutput = AVAssetReaderAudioMixOutput(audioTracks: asset.tracks, audioSettings: nil)
//let assetReaderOutput = AVAssetReaderTrackOutput(track: track!, outputSettings: nil)
guard let assetReader = assetReader else {
print("reader is nil")
return
}
if assetReader.canAdd(assetReaderOutput) == false {
print("Can't add output to the reader ☹️")
return
}
assetReader.add(assetReaderOutput)
// writer
do {
assetWriter = try AVAssetWriter(outputURL: exportURL, fileType: .m4a)
} catch let error {
print("Error with writing >> \(error.localizedDescription)")
}
var channelLayout = AudioChannelLayout()
memset(&channelLayout, 0, MemoryLayout.size(ofValue: channelLayout))
channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo
// use different values to affect the downsampling/compression
let outputSettings: [String: Any] = [AVFormatIDKey: kAudioFormatMPEG4AAC,
AVSampleRateKey: 44100.0,
AVNumberOfChannelsKey: 2,
AVEncoderBitRateKey: 128000,
AVChannelLayoutKey: NSData(bytes: &channelLayout, length: MemoryLayout.size(ofValue: channelLayout))]
let assetWriterInput = AVAssetWriterInput(mediaType: .audio, outputSettings: outputSettings)
guard let assetWriter = assetWriter else { return }
if assetWriter.canAdd(assetWriterInput) == false {
print("Can't add asset writer input ☹️")
return
}
assetWriter.add(assetWriterInput)
assetWriterInput.expectsMediaDataInRealTime = false
// MARK: - File conversion
assetWriter.startWriting()
assetReader.startReading()
let audioTrack = asset.tracks[0]
let startTime = CMTime(seconds: 0, preferredTimescale: audioTrack.naturalTimeScale)
assetWriter.startSession(atSourceTime: startTime)
// We need to do this on another thread, so let's set up a dispatch group...
var convertedByteCount = 0
let dispatchGroup = DispatchGroup()
let mediaInputQueue = DispatchQueue(label: "mediaInputQueue")
//... and go
dispatchGroup.enter()
assetWriterInput.requestMediaDataWhenReady(on: mediaInputQueue) {
while assetWriterInput.isReadyForMoreMediaData {
let nextBuffer = assetReaderOutput.copyNextSampleBuffer()
if nextBuffer != nil {
assetWriterInput.append(nextBuffer!) // FIXME: Handle this safely
convertedByteCount += CMSampleBufferGetTotalSampleSize(nextBuffer!)
} else {
// done!
assetWriterInput.markAsFinished()
assetReader.cancelReading()
dispatchGroup.leave()
DispatchQueue.main.async {
// Notify delegate that conversion is complete
self.delegate?.fileConversionCompleted()
print("Process complete 🎧")
if assetWriter.status == .failed {
print("Writing asset failed ☹️ Error: ", assetWriter.error)
}
}
break
}
}
}
}
}
You need to call finishWriting on your AVAssetWriter to get the output completely written:
assetWriter.finishWriting {
DispatchQueue.main.async {
// Notify delegate that conversion is complete
self.delegate?.fileConversionCompleted()
print("Process complete 🎧")
if assetWriter.status == .failed {
print("Writing asset failed ☹️ Error: ", assetWriter.error)
}
}
}
If exportURL exists before you start the conversion, you should remove it, otherwise the conversion will fail:
try! FileManager.default.removeItem(at: exportURL)
As #matt points out, why the buffer stuff when you could do the conversion more simply with an AVAssetExportSession, and also why convert one of your own assets when you could distribute it already in the desired format?

Couple issue with custom camera

I currently have a custom camera implemented into my application. I am running into two small issues.
1) When I switch b/w the views of the camera (front & back) the audio input dies, and only records video.
2) My method for deciding which camera view (front & back) is which, is depreciated, & I don't know how to exactly go about resolving it. For this one, the code is as follows: The depreciated part is the devices is storing as its variables. xCode is telling me: "Use AVCaptureDeviceDiscoverySession instead."
let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as! [AVCaptureDevice]
// Get the front and back-facing camera for taking photos
for device in devices {
if device.position == AVCaptureDevicePosition.back {
backFacingCamera = device
} else if device.position == AVCaptureDevicePosition.front {
frontFacingCamera = device
}
}
currentDevice = backFacingCamera
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice) else {
return
}
As for the general camera recording here are the codes:
My Variables:
let captureSession = AVCaptureSession()
var currentDevice:AVCaptureDevice?
var backFacingCamera: AVCaptureDevice?
var frontFacingCamera: AVCaptureDevice?
var videoFileOutput : AVCaptureMovieFileOutput?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?
#IBOutlet weak var recordingView: UIView!
switching cameras:
var device = AVCaptureDevice.defaultDevice(withDeviceType: .builtInWideAngleCamera, mediaType: AVMediaTypeVideo, position: .back)
func switchCameras() {
captureSession.beginConfiguration()
// Change the device based on the current camera
let newDevice = (currentDevice?.position == AVCaptureDevicePosition.back) ? frontFacingCamera : backFacingCamera
// Remove all inputs from the session
for input in captureSession.inputs {
captureSession.removeInput(input as! AVCaptureDeviceInput)
}
// Change to the new input
let cameraInput:AVCaptureDeviceInput
do {
cameraInput = try AVCaptureDeviceInput(device: newDevice)
} catch {
print(error)
return
}
if captureSession.canAddInput(cameraInput) {
captureSession.addInput(cameraInput)
}
currentDevice = newDevice
captureSession.commitConfiguration()
if currentDevice?.position == .front {
flashButton.isHidden = true
flashButton.isEnabled = false
} else if currentDevice?.position == .back {
flashButton.isHidden = false
flashButton.isEnabled = true
}
}
& In my view will appear:
mediaViewCapture.frame = CGRect(x: self.view.frame.size.width * 0, y: self.view.frame.size.height * 0, width:self.view.frame.size.width, height: self.view.frame.size.height)
self.view.addSubview(mediaViewCapture)
captureSession.sessionPreset = AVCaptureSessionPresetHigh
let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as! [AVCaptureDevice]
// Get the front and back-facing camera for taking photos
for device in devices {
if device.position == AVCaptureDevicePosition.back {
backFacingCamera = device
} else if device.position == AVCaptureDevicePosition.front {
frontFacingCamera = device
}
}
currentDevice = backFacingCamera
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice) else {
return
}
let audioInputDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio)
do
{
let audioInput = try AVCaptureDeviceInput(device: audioInputDevice)
// Add Audio Input
if captureSession.canAddInput(audioInput)
{
captureSession.addInput(audioInput)
}
else
{
NSLog("Can't Add Audio Input")
}
}
catch let error
{
NSLog("Error Getting Input Device: \(error)")
}
videoFileOutput = AVCaptureMovieFileOutput()
captureSession.addInput(captureDeviceInput)
captureSession.addOutput(videoFileOutput)
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(cameraPreviewLayer!)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
cameraPreviewLayer?.frame = mediaViewCapture.layer.frame
captureSession.startRunning()
& Finally my capture:
func capture(_ captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAt outputFileURL: URL!, fromConnections connections: [Any]!, error: Error!) {
if error == nil {
turnFlashOff()
let videoVC = VideoPreviewVC()
videoVC.url = outputFileURL
self.navigationController?.pushViewController(videoVC, animated: false)
} else {
print("Error saving the video \(error)")
}
}
You can look use AVCaptureDeviceDiscoverySession instead of AVCaptureDevice as it is deprecated following is the code for it:
let deviceDiscovery = AVCaptureDeviceDiscoverySession(deviceTypes: [AVCaptureDeviceType.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .back)
let devices = deviceDiscovery?.devices
for device in devices! {
if device.hasMediaType(AVMediaTypeVideo) {
captureDevice = device
}
}
AVCaptureDeviceType has following types: builtInMicrophone, builtInWideAngleCamera, builtInTelephotoCamera, builtInDualCamera and builtInDuoCamera.
Need to check the audioInput issue when camera is switched.