How to adjust AVCaptureVideoPreview orientation to device current orientation? - swift

I am trying to adjust AVCaptureVideoPreview orientation to device current orientation and I tried someways which I found on Stackoverflow but it didn’t work. How can I adjust AVCaptureVideoPreview orientation to device current orientation?
Here are CameraManager.swift codes:
// MARK: Camera View Model
class CameraViewModel: NSObject,ObservableObject,AVCaptureFileOutputRecordingDelegate{
#Published var session = AVCaptureSession()
#Published var alert = false
#Published var output = AVCaptureMovieFileOutput()
#Published var preview : AVCaptureVideoPreviewLayer!
// MARK: Video Recorder Properties
#Published var isRecording: Bool = false
#Published var recordedURLs: [URL] = []
#Published var previewURL: URL?
#Published var showPreview: Bool = false
// Top Progress Bar
#Published var recordedDuration: CGFloat = 0
// Timing
#Published var maxDuration: CGFloat = 10
func checkPermission(){
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized:
setUp()
return
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { (status) in
if status{
self.setUp()
}
}
case .denied:
self.alert.toggle()
return
default:
return
}
}
func setUp(){
do{
self.session.beginConfiguration()
let cameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)
let videoInput = try AVCaptureDeviceInput(device: cameraDevice!)
let audioDevice = AVCaptureDevice.default(for: .audio)
let audioInput = try AVCaptureDeviceInput(device: audioDevice!)
// MARK: Audio Input
if self.session.canAddInput(videoInput) && self.session.canAddInput(audioInput){
self.session.addInput(videoInput)
self.session.addInput(audioInput)
}
if self.session.canAddOutput(self.output){
self.session.addOutput(self.output)
}
self.session.commitConfiguration()
}
catch{
print(error.localizedDescription)
}
}
func startRecording(){
// MARK: Temporary URL for recording Video
let tempURL = NSTemporaryDirectory() + "\(Date()).mov"
output.startRecording(to: URL(fileURLWithPath: tempURL), recordingDelegate: self)
isRecording = true
}
func stopRecording(){
output.stopRecording()
isRecording = false
}
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
if let error = error {
print(error.localizedDescription)
return
}
// CREATED SUCCESSFULLY
print(outputFileURL)
self.recordedURLs.append(outputFileURL)
if self.recordedURLs.count == 1{
self.previewURL = outputFileURL
return
}
// CONVERTING URLs TO ASSETS
let assets = recordedURLs.compactMap { url -> AVURLAsset in
return AVURLAsset(url: url)
}
self.previewURL = nil
// MERGING VIDEOS
mergeVideos(assets: assets) { exporter in
exporter.exportAsynchronously {
if exporter.status == .failed{
// HANDLE ERROR
print(exporter.error!)
}
else{
if let finalURL = exporter.outputURL{
print(finalURL)
DispatchQueue.main.async {
self.previewURL = finalURL
}
}
}
}
}
}
func mergeVideos(assets: [AVURLAsset],completion: #escaping (_ exporter: AVAssetExportSession)->()){
let compostion = AVMutableComposition()
var lastTime: CMTime = .zero
guard let videoTrack = compostion.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else{return}
guard let audioTrack = compostion.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else{return}
for asset in assets {
// Linking Audio and Video
do{
try videoTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.duration), of: asset.tracks(withMediaType: .video)[0], at: lastTime)
// Safe Check if Video has Audio
if !asset.tracks(withMediaType: .audio).isEmpty{
try audioTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.duration), of: asset.tracks(withMediaType: .audio)[0], at: lastTime)
}
}
catch{
// HANDLE ERROR
print(error.localizedDescription)
}
// Updating Last Time
lastTime = CMTimeAdd(lastTime, asset.duration)
}
// MARK: Temp Output URL
let tempURL = URL(fileURLWithPath: NSTemporaryDirectory() + "Reel-\(Date()).mp4")
// VIDEO IS ROTATED
// BRINGING BACK TO ORIGNINAL TRANSFORM
let layerInstructions = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
// MARK: Transform
var transform = CGAffineTransform.identity
transform = transform.rotated(by: .pi / 0.5)
transform = transform.translatedBy(x: 0, y: 0)
layerInstructions.setTransform(transform, at: .zero)
let instructions = AVMutableVideoCompositionInstruction()
instructions.timeRange = CMTimeRange(start: .zero, duration: lastTime)
instructions.layerInstructions = [layerInstructions]
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = CGSize(width: videoTrack.naturalSize.height, height: videoTrack.naturalSize.width)
videoComposition.instructions = [instructions]
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
guard let exporter = AVAssetExportSession(asset: compostion, presetName: AVAssetExportPresetHighestQuality) else{return}
exporter.outputFileType = .mp4
exporter.outputURL = tempURL
exporter.videoComposition = videoComposition
completion(exporter)
}
}
Here are my CameraPreview.swift codes:
struct CameraPreview: UIViewRepresentable {
#EnvironmentObject var cameraModel : CameraViewModel
var size: CGSize
func makeUIView(context: Context) -> UIView {
let view = UIView()
cameraModel.preview = AVCaptureVideoPreviewLayer(session: cameraModel.session)
cameraModel.preview.frame.size = size
cameraModel.preview.videoGravity = .resizeAspectFill
view.layer.addSublayer(cameraModel.preview)
cameraModel.session.startRunning()
return view
}
func updateUIView(_ uiView: UIView, context: Context) {
}
}

Related

Swift Merging audio and video files into one video

I wrote a program in Swift.I want to merge a video with an audio file, but got this error.
"failed Error Domain=AVFoundationErrorDomain Code=-11838 "Operation Stopped" UserInfo=0x17da4230 {NSLocalizedDescription=Operation Stopped, NSLocalizedFailureReason=The operation is not supported for this media.}"
Code
func mergeVideoWithAudio(videoUrl: URL, audioUrl: URL, success: #escaping ((URL) -> Void), failure: #escaping ((Error?) -> Void)) {
let mixComposition: AVMutableComposition = AVMutableComposition()
var mutableCompositionVideoTrack: [AVMutableCompositionTrack] = []
var mutableCompositionAudioTrack: [AVMutableCompositionTrack] = []
let totalVideoCompositionInstruction : AVMutableVideoCompositionInstruction = AVMutableVideoCompositionInstruction()
let aVideoAsset: AVAsset = AVAsset(url: videoUrl)
let aAudioAsset: AVAsset = AVAsset(url: audioUrl)
if let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid), let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) {
mutableCompositionVideoTrack.append(videoTrack)
mutableCompositionAudioTrack.append(audioTrack)
if let aVideoAssetTrack: AVAssetTrack = aVideoAsset.tracks(withMediaType: .video).first, let aAudioAssetTrack: AVAssetTrack = aAudioAsset.tracks(withMediaType: .audio).first {
do {
try mutableCompositionVideoTrack.first?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration), of: aVideoAssetTrack, at: CMTime.zero)
try mutableCompositionAudioTrack.first?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration), of: aAudioAssetTrack, at: CMTime.zero)
videoTrack.preferredTransform = aVideoAssetTrack.preferredTransform
} catch{
print(error)
}
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero,duration: videoTrack.timeRange.duration)
}
}
let mutableVideoComposition: AVMutableVideoComposition = AVMutableVideoComposition()
mutableVideoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mutableVideoComposition.renderSize = CGSize(width: 480, height: 640)
if let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true).first {
let outputURL = URL(fileURLWithPath: documentsPath).appendingPathComponent("\("fileName").mp4")
do {
if FileManager.default.fileExists(atPath: outputURL.path) {
try FileManager.default.removeItem(at: outputURL)
}
} catch { }
// var outputURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0].appendingPathComponent("temp_video_data.mp4", isDirectory: false)
// outputURL = URL(fileURLWithPath: outputURL.path)
// let mediaItems = MPMediaQuery.songs().items
// let mediaCollection = MPMediaItemCollection(items: mediaItems ?? [])
// let player1 = MPMusicPlayerController.systemMusicPlayer
// player1.setQueue(with: mediaCollection)
// player1.play()
// guard let url = URL(string: url )else {return}
// let player = AVPlayer(url: url)
// let playerController = AVPlayerViewController()
// playerController.player = player
// playerController.allowsPictureInPicturePlayback = true
// playerController.delegate = self
// playerController.player?.play()
// present(playerController, animated: true, completion: nil)
// let player3 = AVPlayer(url: vidUrl!)
// let playerLayer3 = AVPlayerLayer(player: player3)
// playerLayer3.videoGravity = .resizeAspect
// playerLayer3.needsDisplayOnBoundsChange = true //
// playerLayer3.frame = self.videoImg.bounds // 1
//
// self.videoImg.layer.masksToBounds = true // 2
// self.videoImg.layer.addSublayer(playerLayer3)
// player3.play()
// let assetsLib = PHPhotoLibrary()
// assetsLib.
// assetsLib.writeVideoAtPath(toSavedPhotosAlbum: outputURL, completionBlock: nil)
print(outputURL, "outputURL.....")
if let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) {
exportSession.outputURL = NSURL.fileURL(withPath: outputURL.path)
exportSession.outputFileType = AVFileType.mov
exportSession.shouldOptimizeForNetworkUse = true
// try to export the file and handle the status cases
exportSession.exportAsynchronously(completionHandler: {
switch exportSession.status {
case .failed:
if let _error = exportSession.error {
print("failedddd",_error)
failure(_error)
}
case .cancelled:
if let _error = exportSession.error {
print("cancellled")
failure(_error)
}
default:
print("finished")
success(outputURL)
}
})
} else {
print("failurrrrrrrr")
failure(nil)
}
}
}
Here the calling of Function
let vidUrl! = file:///private/var/mobile/Containers/Data/PluginKitPlugin/C6BD4529-DF11-4A67-B56E-C3A841D86D75/tmp/trim.CC7EABA8-08EC-4C33-8E6A-E85A5EBCFB64.MOV
let outputURL! = file:///var/mobile/Containers/Data/Application/9115DCFE-4134-48AD-84BF-452AC0D33278/Documents/y2matecom-ChallaOfficialFullVideoKhanSaabAYMediaRecordsLatestPunjabiSongs2016.mp3
mergeVideoWithAudio(videoUrl: vidUrl!, audioUrl: outputURL!) { resultUrl in
print("sucessssssssss",resultUrl)
} failure: { error in
print(error?.localizedDescription,"hgjjhfghdg")
}
Note:Every time the Failure Block calls and cause operation stopped
In my idea media type like mpeg4 is wrong. Where is the problem? What am i missing?

AVURLAsset reverting to original orientation even though the video file is mirrored

I am trying to merge videos that are recorded to local files. I want to support both the front and back camera but saw that recording with the front camera causes the video to flip. So when recording I used...
func startRecording(){
if isCameraButtonDisabled == false {
// MARK: Temporary URL for recording Video
let tempURL = NSTemporaryDirectory() + "\(Date()).mov"
if self.videoDeviceInput.device.position == .back {
output.startRecording(to: URL(fileURLWithPath: tempURL), recordingDelegate: self)
} else {
output.connection(with: AVMediaType.video)?.isVideoMirrored = true
output.startRecording(to: URL(fileURLWithPath: tempURL), recordingDelegate: self)
}
withAnimation(.spring(response: 0.5, dampingFraction: 0.5, blendDuration: 1)) {
isRecording = true
}
}
}
However, when I return only the outputFileURL the front-facing video is mirrored as I intended. But after I convert each video into an AVURLAsset for the purpose of combining the videos, it changes the orientation of the video back to the original (non-mirrored) version. I am very confused as to why this happens and how I can maintain the mirrored version when joining videos using AVURLAsset. Here is the function that returns the video file as well as the one that merges them.
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
if let error = error {
print(error.localizedDescription)
return
}
withAnimation {
self.recordedURLs.append(outputFileURL)
}
if self.recordedURLs.count == 1{
self.previewURL = outputFileURL
return
}
let assets = recordedURLs.compactMap { url -> AVURLAsset in
return AVURLAsset(url: url)
}
for asset in assets {
print(asset.preferredTransform)
}
self.previewURL = nil
mergeVideos(assets: assets) { exporter in
exporter.exportAsynchronously {
if exporter.status == .failed{
// HANDLE ERROR
print(exporter.error!)
}
else{
if let finalURL = exporter.outputURL{
print(finalURL)
DispatchQueue.main.async {
self.previewURL = finalURL
}
}
}
}
}
}
func mergeVideos(assets: [AVURLAsset],completion: #escaping (_ exporter: AVAssetExportSession)->()){
let compostion = AVMutableComposition()
var lastTime: CMTime = .zero
guard let videoTrack = compostion.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else{return}
guard let audioTrack = compostion.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else{return}
for asset in assets {
// Linking Audio and Video
do{
try videoTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.duration), of: asset.tracks(withMediaType: .video)[0], at: lastTime)
// Safe Check if Video has Audio
if !asset.tracks(withMediaType: .audio).isEmpty{
try audioTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.duration), of: asset.tracks(withMediaType: .audio)[0], at: lastTime)
}
}
catch{
print(error.localizedDescription)
}
lastTime = CMTimeAdd(lastTime, asset.duration)
}
// MARK: Temp Output URL
let tempURL = URL(fileURLWithPath: NSTemporaryDirectory() + "\(Date()).mp4")
let layerInstructions = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
var transform = CGAffineTransform.identity
transform = transform.rotated(by: 90 * (.pi / 180))//
transform = transform.translatedBy(x: 0, y: -videoTrack.naturalSize.height)//
layerInstructions.setTransform(transform, at: .zero)
let instructions = AVMutableVideoCompositionInstruction()
instructions.timeRange = CMTimeRange(start: .zero, duration: lastTime)
instructions.layerInstructions = [layerInstructions]
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = CGSize(width: videoTrack.naturalSize.height, height: videoTrack.naturalSize.width)
videoComposition.instructions = [instructions]
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
guard let exporter = AVAssetExportSession(asset: compostion, presetName: AVAssetExportPresetHighestQuality) else{return}
exporter.outputFileType = .mp4
exporter.outputURL = tempURL
exporter.videoComposition = videoComposition
completion(exporter)
}

Play video synthesized by AVMutableComposition in AVPlayer with error

I am creating an IOS video editing tool by AVMutableComposition.
If I combine multiple videos in a single video mutabletrack, it works fine. However, when I create mutabletrack per video and synthesize the video and play it in AVPlayer, only sound plays and video stops when move on the next video. However, if I seek video, it works normally. What's the cause?
The following is the code in which the current situation occur.
static func mergeClips(
videos: [ExCAVideoLayer]?,
audios: [ExCAAudioLayer]? = nil
) -> AVPlayerItem? {
guard let videos = videos else {
return nil
}
let mixComposition = AVMutableComposition()
var layerInstructions = [AVMutableVideoCompositionLayerInstruction]()
do {
// Determind video aspect ratio
var aspectRatio: Double = 0.00001
for video in videos {
guard let url = video.videoUrl else {
continue
}
if let naturalSize = self.resolutionForLocalVideo(url: url) {
let naturalAspectRatio = naturalSize.height == 0 ? 1 : naturalSize.width / naturalSize.height
if aspectRatio < naturalAspectRatio {
aspectRatio = naturalAspectRatio
}
}
}
var frameWidth = UIScreen.main.bounds.width
var frameHeight = round(UIScreen.main.bounds.width / aspectRatio)
if frameWidth.truncatingRemainder(dividingBy: 2) > 0 {
frameWidth = frameWidth - 1
}
if frameHeight.truncatingRemainder(dividingBy: 2) > 0 {
frameHeight = frameHeight - 1
}
let frameSize = CGSize(width: frameWidth, height: frameHeight)
// Video composition
var currentDuration = mixComposition.duration
for video in videos {
guard let url = video.videoUrl else {
continue
}
let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
let avAsset = AVAsset(url: url)
if let layerInstruction = self.videoCompositionInstruction(videoTrack, asset: avAsset, frameSize: frameSize) {
layerInstructions.append(layerInstruction)
}
let streamRange = CMTimeRangeMake(start: CMTime.zero, duration: CMTimeAdd(avAsset.duration, CMTimeMakeWithSeconds(video.gap, preferredTimescale: 1)))
if let streamVideoTrack = avAsset.tracks(withMediaType: .video).first {
try videoTrack?.insertTimeRange(streamRange, of: streamVideoTrack, at: currentDuration)
videoTrack?.preferredTransform = streamVideoTrack.preferredTransform
}
if let streamAudioTrack = avAsset.tracks(withMediaType: .audio).first {
try audioTrack?.insertTimeRange(streamRange, of: streamAudioTrack, at: currentDuration)
}
currentDuration = mixComposition.duration
}
// Audio composition
if let audios = audios {
for audio in audios {
guard let url = audio.audioUrl else {
continue
}
let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
let avAsset = AVAsset(url: url)
let streamRange = CMTimeRangeMake(start: CMTime.zero, duration: avAsset.duration)
if let streamAudioTrack = avAsset.tracks(withMediaType: .audio).first {
try audioTrack?.insertTimeRange(streamRange, of: streamAudioTrack, at: CMTimeMakeWithSeconds(audio.startTime, preferredTimescale: 1))
}
}
}
//AVVideoComposition and layout
let videoComposition = AVMutableVideoComposition()
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.renderSize = CGSize(width: frameSize.width, height: frameSize.height)
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction .timeRange = CMTimeRange(start: .zero, duration: mixComposition.duration)
mainInstruction.backgroundColor = UIColor.black.cgColor
mainInstruction.layerInstructions = layerInstructions
videoComposition.instructions = [mainInstruction]
let assetKeys = [
"playable",
"hasProtectedContent"
]
let avPlayItem = AVPlayerItem(asset: mixComposition, automaticallyLoadedAssetKeys: assetKeys)
avPlayItem.videoComposition = videoComposition
return avPlayItem
}
catch(let error) {
print("Could not create mixComposition \(error.localizedDescription)")
return nil
}
}
func PlayVideo() {
guard let avPlayerItem = VideoComposition.mergeClips(videos: self.projectRender?.videoItems) else {
return
}
self.avPlayer = AVPlayer(playerItem: avPlayerItem)
self.avPlayerLayer = AVPlayerLayer(player: self.avPlayer)
self.avPlayerLayer?.needsDisplayOnBoundsChange = true
if let avPlayerLayer = self.avPlayerLayer {
self.videoContainer?.layer.addSublayer(avPlayerLayer)
}
self.avPlayer?.play()
}

Recording video and audio with AVAssetWriter in Swift

Im trying to add the devices microphone audio to a video recording from the devices camera. The video is filtered with a CIFilter and works as expected. My problem is the mic audio is not attached to the video once saved.
I have tried setting the audio settings manually like this
let audioSettings : [String : Any] = [
AVFormatIDKey : kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey: 1,
AVSampleRateKey : 44100,
AVEncoderBitRateKey : 64000
]
but using the recommendedAudioSettingsForAssetWriter method seems like the correct approach as the video recording works with the recommendedAudioSettingsForAssetWriter method.
Can anyone tell me how to achieve this or point me in the right direction?
My code so far:
import UIKit
import AVFoundation
class VideoViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
#IBOutlet weak var imageView: UIImageView!
lazy var cameraDevice: AVCaptureDevice? = {
return AVCaptureDevice.default(for: .video)
}()
lazy var micDevice: AVCaptureDevice? = {
return AVCaptureDevice.default(for: .audio)
}()
var captureSession = AVCaptureSession()
var outputURL: URL!
var orientation: AVCaptureVideoOrientation = .landscapeRight
var filterObject = FilterObject()
var assetWriter: AVAssetWriter?
var assetWriterInput: AVAssetWriterInput?
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor?
var fileName = ""
var recordingState = RecordingState.idle
var time: Double = 0
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
let context = CIContext()
override func viewDidLoad() {
super.viewDidLoad()
setupCameraDevice()
setupAudioDevice()
setupInputOutput()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
setUpAuthStatus()
}
#IBAction func recordPressed(_ sender: UIButton) {
switch recordingState {
case .idle:
recordingState = .start
case .capturing:
recordingState = .end
default:
break
}
}
func setUpAuthStatus() {
if AVCaptureDevice.authorizationStatus(for: AVMediaType.video) != .authorized {
AVCaptureDevice.requestAccess(for: AVMediaType.video, completionHandler: { (authorized) in
DispatchQueue.main.async {
if authorized {
self.setupInputOutput()
}
}
})
}
if AVCaptureDevice.authorizationStatus(for: AVMediaType.audio) != .authorized {
AVCaptureDevice.requestAccess(for: AVMediaType.audio, completionHandler: { (authorized) in
DispatchQueue.main.async {
if authorized {
self.setupInputOutput()
}
}
})
}
}
func setupCameraDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == .back {
cameraDevice = device
}
}
}
func setupAudioDevice() {
let audioDeviceDisoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInMicrophone], mediaType: .audio, position: .unspecified)
let devices = audioDeviceDisoverySession.devices
micDevice = devices[0]
}
func setupInputOutput() {
do {
guard let cameraDevice = cameraDevice else { return }
let captureDeviceInput = try AVCaptureDeviceInput(device: cameraDevice)
guard let micDevice = micDevice else { return }
let micDeviceInput = try AVCaptureDeviceInput(device: micDevice)
captureSession.sessionPreset = AVCaptureSession.Preset.hd1920x1080
if captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
}
if captureSession.canAddInput(micDeviceInput) {
captureSession.addInput(micDeviceInput)
}
let queue = DispatchQueue(label: "com.apple.sample.capturepipeline.video", attributes: [])
if captureSession.canAddOutput(videoOutput) {
videoOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(videoOutput)
}
if captureSession.canAddOutput(audioOutput) {
audioOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(audioOutput)
}
captureSession.commitConfiguration()
captureSession.startRunning()
} catch {
print(error)
}
}
func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
audioOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
let cameraImage = CIImage(cvImageBuffer: imageBuffer)
guard let name = filterObject.name else {return}
let effect = FilterType.genericFilter(name: name, cameraImage: cameraImage)
effect.setValue(cameraImage, forKey: kCIInputImageKey)
TableData.setFilterValues(withFilterName: name, effect: effect, values: [value1, value2])
guard let outputImage = effect.outputImage else { return }
context.render(outputImage, to: imageBuffer)
guard let cgImage = self.context.createCGImage(outputImage, from: cameraImage.extent) else { return }
DispatchQueue.main.async {
let filteredImage = UIImage(cgImage: cgImage)
self.imageView.image = filteredImage
}
let timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer).seconds
switch recordingState {
case .start:
fileName = UUID().uuidString
let videoPath = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(fileName).mov")
let writer = try! AVAssetWriter(outputURL: videoPath, fileType: .mov)
let videoSettings = videoOutput.recommendedVideoSettingsForAssetWriter(writingTo: .mov)
let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
videoInput.mediaTimeScale = CMTimeScale(bitPattern: 600)
videoInput.expectsMediaDataInRealTime = true
let audioSettings = audioOutput.recommendedAudioSettingsForAssetWriter(writingTo: .m4a)
let audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings as? [String : Any])
audioInput.expectsMediaDataInRealTime = true
//videoInput.transform = CGAffineTransform(rotationAngle: .pi/2)
let pixelAdapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: nil)
if writer.canAdd(videoInput) {
writer.add(videoInput)
}
if writer.canAdd(audioInput) {
writer.add(audioInput)
}
writer.startWriting()
writer.startSession(atSourceTime: .zero)
assetWriter = writer
assetWriterInput = videoInput
pixelBufferAdaptor = pixelAdapter
recordingState = .capturing
time = timestamp
case .capturing:
if assetWriterInput?.isReadyForMoreMediaData == true {
let newTime = CMTime(seconds: timestamp - time, preferredTimescale: CMTimeScale(600))
pixelBufferAdaptor?.append(imageBuffer, withPresentationTime: newTime)
}
break
case .end:
guard assetWriterInput?.isReadyForMoreMediaData == true, assetWriter!.status != .failed else { break }
let url = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("\(fileName).mov")
assetWriterInput?.markAsFinished()
assetWriter?.finishWriting { [weak self] in
self?.recordingState = .idle
self?.assetWriter = nil
self?.assetWriterInput = nil
DispatchQueue.main.async {
let activity = UIActivityViewController(activityItems: [url], applicationActivities: nil)
self?.present(activity, animated: true, completion: nil)
}
}
default:
break
}
}
}
Your audio settings do not look correct. The AVSampleRateKey should come from the number of samples from the description of the first audio sample that comes in. Your value of 44100 should be set as the value for the AVEncoderBitRateKey and that should maybe be set to AVEncoderBitRateKey: Int(48_000)
To get the number of sample first call
let fmt = CMSampleBufferGetFormatDescription(sampleBuffer)
let asbd = CMAudioFormatDescriptionGetStreamBasicDescription(fmt!)
and then the sample rate will be asbd?.pointee.mSampleRate and then that should be set as the AVSampleRateKey value in the audio settings (I think)

func captureOutput is never called

Add like to add filter to each frame i record in real time and display the filtered image in UIImageView, if anyone could help it would be nice.
but captureoutput is never called, here is my code.
class Measurement: UIViewController , AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var cameraPreview: UIView!
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
setupCameraSession()
toggleTorch(on: true)
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
view.layer.addSublayer(previewLayer)
cameraSession.startRunning()
}
lazy var cameraSession: AVCaptureSession = {
let s = AVCaptureSession()
s.sessionPreset = AVCaptureSession.Preset.low
return s
}()
lazy var previewLayer: AVCaptureVideoPreviewLayer = {
let preview = AVCaptureVideoPreviewLayer(session: self.cameraSession)
preview.position = CGPoint(x:182,y: 485)
preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
preview.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
preview.bounds = imageView.bounds
//preview.position = CGPoint(x:self.view.bounds.midX,y: self.view.bounds.midY)
imageView.layer.addSublayer(preview)
return preview
}()
func toggleTorch(on: Bool) {
guard let device = AVCaptureDevice.default(for: .video) else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
func setupCameraSession() {
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
do {
let deviceInput = try AVCaptureDeviceInput(device: captureDevice!)
cameraSession.beginConfiguration()
if (cameraSession.canAddInput(deviceInput) == true) {
cameraSession.addInput(deviceInput)
print("Processing Data.")
}
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA as UInt32)] as [String : AnyObject]
dataOutput.alwaysDiscardsLateVideoFrames = true
print("Processing Data.")
if (cameraSession.canAddOutput(dataOutput) == true) {
cameraSession.addOutput(dataOutput)
print("Processing Data.")
}
cameraSession.commitConfiguration()
let queue = DispatchQueue(label: "com.invasivecode.videoQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
catch let error as NSError {
print("\(error), \(error.localizedDescription)")
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("Processing Data.")
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
//let chromaKeyFilter = colorCubeFilterForChromaKey(hueAngle: 120)
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
let context = CIContext()
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return }
let image = UIImage(cgImage: cgImage)
if let chromaKeyFilter = CIFilter(name: "CISepiaTone") {
let beginImage = CIImage(image: image)
chromaKeyFilter.setValue(beginImage, forKey: kCIInputImageKey)
chromaKeyFilter.setValue(0.5, forKey: kCIInputIntensityKey)
if let output = chromaKeyFilter.outputImage {
if let cgimg = context.createCGImage(output, from: output.extent) {
let processedImage = UIImage(cgImage: cgimg)
// do something interesting with the processed image
imageView.image = processedImage
}
}
}
}
func captureOutput(_ captureOutput: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// Here you can count how many frames are dopped
}
func startCapture() {
print("\(self.classForCoder)/" + #function)
if cameraSession.isRunning {
print("already running")
return
}
cameraSession.startRunning()
toggleTorch(on: true)
}
You need to set the delegate
dataOutput.sampleBufferDelegate = self