Connect UIViewRepresentable to SwiftUI - swift

I have a SwiftUI based app with a simple button that when pressed is supposed to open a Camera Class from AVFoundation that utilizes UIKit as well. Under the sheet I am not sure what exactly to place there. I tried CameraSession() and a few other ideas but I am sort of lost on bridging this SwiftUI button to open camera app. Thank you!
//Content View
import SwiftUI
struct ContentView: View {
//#State private var image: Image?
#State private var showingCameraSession = false
//#Binding var isShown: Bool
var body: some View {
VStack{
ControlButton(systemIconName: "slider.horizontal.3"){
//Button("Seelect Image") {
showingCameraSession = true
} .sheet(isPresented: $showingCameraSession){
//What to place here?
}
}
}
}
//CameraSession
import AVFoundation
//import RealityKit
import UIKit
import SwiftUI
struct CameraSession : UIViewControllerRepresentable {
//#Binding var isShown: Bool
typealias UIViewControllerType = CaptureSession
func makeUIViewController(context: Context) -> CaptureSession{
return CaptureSession()
}
func updateUIViewController(_ uiViewController: CaptureSession, context: Context) {
// if(self.isShown){
//CameraSession.didTapTakePhoto()
// shutterButton.addTarget(self, action: #selector(didTapTakePhoto), for: .touchUpInside) //tie button to actual function
}
}
class CaptureSession: UIViewController {
//#Binding var isShown: Bool
//Reference: https://www.youtube.com/watch?v=ZYPNXLABf3c
//CaptureSession
var session: AVCaptureSession?
//PhotoOutput --> to the Cloud
let output = AVCapturePhotoOutput()
// Video Preview
let previewLayer = AVCaptureVideoPreviewLayer()
//Shutter Button
private let shutterButton: UIButton = {
let button = UIButton(frame: CGRect(x:0, y:0, width: 100, height: 100))
button.layer.cornerRadius = 50
button.layer.borderWidth = 10
button.layer.borderColor = UIColor.white.cgColor
return button
}()
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = .black
//previewLayer.backgroundColor = UIColor.systemRed.cgColor
view.layer.addSublayer(previewLayer)
view.addSubview(shutterButton)
checkCameraPermissions()
shutterButton.addTarget(self, action: #selector(didTapTakePhoto), for: .touchUpInside) //tie button to actual function
}
override func viewDidLayoutSubviews(){
super.viewDidLayoutSubviews()
previewLayer.frame = view.bounds
shutterButton.center = CGPoint(x: view.frame.size.width/2, y: view.frame.size.height - 100)
}
private func checkCameraPermissions() {
switch AVCaptureDevice.authorizationStatus(for: .video){
case .notDetermined:
//Request Permission
AVCaptureDevice.requestAccess(for: .video) { [weak self] granted in
guard granted else {
return
}
DispatchQueue.main.async{
self?.setUpCamera()
}
}
case .restricted:
break
case .denied:
break
case .authorized:
setUpCamera()
#unknown default:
break
}
}
//with Photogrammetry, you also have to create a session similar https://developer.apple.com/documentation/realitykit/creating_3d_objects_from_photographs/
// example app: https://developer.apple.com/documentation/realitykit/taking_pictures_for_3d_object_capture
private func setUpCamera(){
let session = AVCaptureSession()
if let device = AVCaptureDevice.default(for: .video){
do{
let input = try AVCaptureDeviceInput(device: device)
if session.canAddInput(input){
session.addInput(input) //some Devices contract each other.
}
if session.canAddOutput(output) {
session.addOutput(output)
}
previewLayer.videoGravity = .resizeAspectFill //content does not get distored or filled
previewLayer.session = session
session.startRunning()
self.session = session
}
catch{
print(error)
}
}
}
//originally private
#objc private func didTapTakePhoto() {
output.capturePhoto(with: AVCapturePhotoSettings(),
delegate: self)
// let vc = UIHostingController(rootView: ContentView())
// present(vc, animated: true)
}
}
//AVCaptureOutput is AVFoundations version of photo output
extension CaptureSession: AVCapturePhotoCaptureDelegate {
func photoOutput( output: AVCaptureOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error:
Error?){
guard let data = photo.fileDataRepresentation() else { //where to store file information
return
}
let image = UIImage(data: data)
session?.stopRunning()
let imageView = UIImageView(image: image)
imageView.contentMode = .scaleAspectFill
imageView.frame = view.bounds
view.addSubview(imageView)
}
}

So to get around this first make your app has permission to access the users camera(go to Info.plist or info tab beside the build settings at the top and add Privacy camera usage and add "We need your camera to perform this action")
After that a simple call in the sheet's modifier should do the trick
struct ContentView: View {
//#State private var image: Image?
#State private var showingCameraSession = false
//#Binding var isShown: Bool
var body: some View {
VStack{
// ControlButton(systemIconName: "slider.horizontal.3"){
Button("Seelect Image") {
showingCameraSession = true
} .sheet(isPresented: $showingCameraSession){
//What to place here?
CameraSession()
}
}
}
}

Related

How to open a SwiftUI view by tapping an entity in RealityKit?

I am using SwiftUI with RealityKit. As displayed in the code below, I have a plane entity that when tapped simply prints the name of the entity. What approach should I take toward navigating to a new view when I tap the entity? It would be preferable to navigate as with a navigation link in a normal view, but if that is not possible then perhaps a fullScreenCover?
ARViewContainer.swift:
class Coordinator: NSObject {
weak var view: ARView?
#objc func handleTap(_ recognizer: UITapGestureRecognizer) {
guard let view = self.view else { return }
let tapLocation = recognizer.location(in: view)
if let entity = view.entity(at: tapLocation) as? ModelEntity {
print(entity.name)
}
}
}
struct ARViewContainer: UIViewRepresentable {
typealias UIViewType = ARView
func makeUIView(context: Context) -> ARView{
let arView = ARView(frame: .zero, cameraMode: .ar, automaticallyConfigureSession: true)
context.coordinator.view = arView
arView.addGestureRecognizer(UITapGestureRecognizer(target: context.coordinator, action: #selector(Coordinator.handleTap)))
arView.scene.anchors.removeAll()
let anchor = AnchorEntity()
let plane = MeshResource.generatePlane(width: 1, height: 1)
var material = UnlitMaterial()
material.color = .init(tint: .white,
texture: .init(try! .load(named: "instagram")))
let planeEntity = ModelEntity(mesh: plane, materials: [material])
planeEntity.generateCollisionShapes(recursive: true)
planeEntity.name = "Plane Entity"
planeEntity.position.z -= 1.0
planeEntity.setParent(anchor)
arView.scene.addAnchor(anchor)
return arView
}
func updateUIView(_ uiView: ARView, context: Context){
}
func makeCoordinator() -> Coordinator {
Coordinator()
}
}
ContentView.swift
struct ContentView: View {
#State var open = false
var body: some View {
NavigationView{
ZStack {
ARViewContainer()
.ignoresSafeArea(.all)
}
}
}
}
View I want to navigate to:
struct TestView : View {
var body : some View {
VStack{
Text("Test View")
}
}
}
Manage the state of the view in an observable object and modify it from your AR view.
struct ContentView: View {
#ObservedObject var settings = Settings.shared
var body: some View {
NavigationView {
ZStack {
ARViewContainer()
.ignoresSafeArea(.all)
NavigationLink("", isActive: $settings.shouldOpenDetailsView) {
TestView()
}
}
}
}
}
class Settings: ObservableObject {
static let shared = Settings()
#Published var shouldOpenDetailsView = false
}
class Coordinator: NSObject {
weak var view: ARView?
#objc func handleTap(_ recognizer: UITapGestureRecognizer) {
guard let view = self.view else { return }
let tapLocation = recognizer.location(in: view)
if let entity = view.entity(at: tapLocation) as? ModelEntity {
Settings.shared.shouldOpenDetailsView = true
}
}
}

SwiftUI pass parameter to Class from UIViewRepresentable

I have a question that I think is quite simple and yet I get stuck, so I come to ask for help and maybe it could be other people !
I want to loop a video, but i've problem, i need to pass a var to a struct then a class like :
struct FinalPreview: View {
var url: URL
PlayerView(url: url)
.aspectRatio(contentMode: .fill)
.frame(width: size.width, height: size.height)
.clipShape(RoundedRectangle(cornerRadius: 30, style: .continuous))
.onAppear{
if player.currentItem == nil {
let item = AVPlayerItem(url: url)
player.replaceCurrentItem(with: item)
}
DispatchQueue.main.asyncAfter(deadline: .now() + 0.5, execute: {
player.play()
})
}
}
struct PlayerView: UIViewRepresentable {
var url: URL
func updateUIView(_ uiView: UIView, context: UIViewRepresentableContext<PlayerView>) {
}
func makeUIView(context: Context) -> UIView {
return PlayerUIView(frame: .zero)
}
}
class PlayerUIView: UIView {
private let playerLayer = AVPlayerLayer()
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override init(frame: CGRect) {
super.init(frame: frame)
// Load the resource
let fileUrl = "MY URL VAR FROM PlayerView"
// Setup the player
let player = AVPlayer(url: fileUrl)
playerLayer.player = player
playerLayer.videoGravity = .resizeAspectFill
layer.addSublayer(playerLayer)
// Setup looping
player.actionAtItemEnd = .none
// Start the movie
player.play()
}
#objc
func playerItemDidReachEnd(notification: Notification) {
playerLayer.player?.seek(to: CMTime.zero)
}
override func layoutSubviews() {
super.layoutSubviews()
playerLayer.frame = bounds
}
}
It's to loop a video.
I'm also looking to hide AVPlayer controls by using :
struct AVPlayerControllerRepresented: UIViewControllerRepresentable {
var player: AVPlayer
func makeUIViewController(context: Context) -> AVPlayerViewController {
let controller = AVPlayerViewController()
controller.player = player
controller.showsPlaybackControls = false
return controller
}
func updateUIViewController(_ uiViewController: AVPlayerViewController, context: Context) {
}
}
To hide the controls, it works very well, I don't know how to combine both...
Thank you.
You actually don't need 3 different types, just a single UIViewControllerRepresentable with a url:
import AVKit
import SwiftUI
struct PlayerView: UIViewControllerRepresentable {
var url: URL
func makeUIViewController(context: Context) -> AVPlayerViewController {
// create a looping auto-starting video player:
let player = AVPlayer(url: url)
player.actionAtItemEnd = .none
NotificationCenter.default.addObserver(forName: .AVPlayerItemDidPlayToEndTime, object: player.currentItem, queue: .main) { [weak player] _ in
player?.seek(to: .zero)
}
DispatchQueue.main.asyncAfter(deadline: .now() + 0.5) { [weak player] in
player?.play()
}
// use that player in a view controller:
let controller = AVPlayerViewController()
controller.player = player
controller.videoGravity = .resizeAspectFill
controller.showsPlaybackControls = false
return controller
}
func updateUIViewController(_ uiViewController: AVPlayerViewController, context: Context) {}
}
struct PlayerView_Previews: PreviewProvider {
static var previews: some View {
PlayerView(url: someVideoURL) // replace the URL with your own
}
}

Create pdf with WKWebView.pdf(configuration:)

I want to create a pdf on macOS with the new WKWebView.pdf(configuration:) which was introduced in macOS 12/iOS 15. It tries to make use of the new async/await functionality (which I most likely have not grasped entirely I am afraid...).
Right now, I am getting an error that I have no idea how to handle:
Error Domain=WKErrorDomain Code=1 "An unknown error occurred"
UserInfo={NSLocalizedDescription=An unknown error occurred}
I try to load a html string into a web view, which I then want to generate the pdf from. The function I use to generate my PDFDocument looks like this:
func generatePdf() async {
let webView = WKWebView()
await webView.loadHTMLString(html, baseURL: nil)
let config = WKPDFConfiguration()
config.rect = .init(origin: .zero, size: .init(width: 595.28, height: 841.89))
do {
//this is where the error is happening
let pdfData = try await webView.pdf(configuration: config)
self.pdf = PDFDocument(data: pdfData)
} catch {
print(error) //this error gets printed
}
}
My best guess as it currently stands is that WKWebView's loadHTMLString has not finished loading the html–I did allow for outgoing connection in the app sandbox that's not it...
For the sake of completeness, here's the entire code:
import SwiftUI
import PDFKit
import WebKit
#main
struct AFPdfApp: App {
var body: some Scene {
WindowGroup {
ContentView()
}
}
}
struct ContentView: View {
#StateObject private var vm = ViewModel()
var body: some View {
VStack {
TextEditor(text: $vm.html)
.frame(width: 300.0, height: 200.0)
.border(Color.accentColor, width: 1.0)
.padding()
PdfViewWrapper(pdfDocument: $vm.pdf)
}
.toolbar {
Button("Create PDF") {
Task {
await vm.generatePdf()
}
}
}
}
static let initHtml = """
<h1>Some fancy html</h1>
<h2>…and now how do I create a pdf from this?</h2>
"""
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
class ViewModel: ObservableObject {
#Published var html = """
<h1>Some fancy html</h1>
<h2>…and now let's create some pdf…</h2>
"""
#Published var pdf: PDFDocument? = nil
func generatePdf() async {
let webView = WKWebView()
await webView.loadHTMLString(html, baseURL: nil)
let config = WKPDFConfiguration()
config.rect = .init(origin: .zero, size: .init(width: 595.28, height: 841.89))
do {
let pdfData = try await webView.pdf(configuration: config)
self.pdf = PDFDocument(data: pdfData)
} catch {
print(error)
}
}
}
struct PdfViewWrapper: NSViewRepresentable {
#Binding var pdfDocument: PDFDocument?
func makeNSView(context: Context) -> PDFView {
return PDFView()
}
func updateNSView(_ nsView: PDFView, context: Context) {
nsView.document = pdfDocument
}
}
After Chris made me take another look at it (thanks for that :-) ) I am now a step closer to a working solution.
It really seems as though I really have to wait for the webView to load the html prior to creating the pdf. While I was not able to make it work with WKWebView.pdf(configuration:), I now have a (kind of…) working solution by using WKWebView.createPDF(configuration:completionHandler:):
func generatePdf() {
webView.loadHTMLString(htmlString, baseURL: nil)
DispatchQueue.main.asyncAfter(deadline: .now() + 0.5) {
let config = WKPDFConfiguration()
config.rect = .init(origin: .zero, size: .init(width: 595.28, height: 841.89))
self.webView.createPDF(configuration: config){ result in
switch result {
case .success(let data):
self.pdf = PDFDocument(data: data)
case .failure(let error):
print(error)
}
}
}
}
I said "kind of works" above, because the resulting pdf seems to introduce a new line after each word, which is weird–but I will scope that issue to another research/question on SO.
Again, for the sake of completeness, here's the whole "app":
import SwiftUI
import PDFKit
import WebKit
#main
struct AFPdfApp: App {
var body: some Scene {
WindowGroup {
ContentView()
}
}
}
//MARK: View
struct ContentView: View {
#StateObject private var vm = ViewModel()
var body: some View {
VStack {
TextEditor(text: $vm.htmlString)
.frame(width: 300.0, height: 200.0)
.border(Color.accentColor, width: 1.0)
.padding()
WebViewWrapper(htmlString: $vm.htmlString)
PdfViewRepresentable(pdfDocument: $vm.pdf)
}
.toolbar {
Button("Create PDF") {
Task {
vm.generatePdf()
}
}
}
}
}
//MARK: ViewModel
class ViewModel: ObservableObject {
#Published var htmlString = """
<h1>Some fancy html</h1>
<h2>…and now let's create some pdf…</h2>
"""
#Published var webView = WKWebView()
#Published var pdf: PDFDocument? = nil
func generatePdf() {
webView.loadHTMLString(htmlString, baseURL: nil)
DispatchQueue.main.asyncAfter(deadline: .now() + 0.5) {
let config = WKPDFConfiguration()
config.rect = .init(origin: .zero, size: .init(width: 595.28, height: 841.89))
self.webView.createPDF(configuration: config){ result in
switch result {
case .success(let data):
self.pdf = PDFDocument(data: data)
case .failure(let error):
print(error)
}
}
}
}
}
//MARK: ViewRepresentables
struct WebViewWrapper: NSViewRepresentable {
#Binding var htmlString: String
public func makeNSView(context: Context) -> WKWebView {
return WKWebView()
}
public func updateNSView(_ nsView: WKWebView, context: Context) {
nsView.loadHTMLString(htmlString, baseURL: nil)
}
}
struct PdfViewRepresentable: NSViewRepresentable {
#Binding var pdfDocument: PDFDocument?
func makeNSView(context: Context) -> PDFView {
return PDFView()
}
func updateNSView(_ nsView: PDFView, context: Context) {
nsView.document = pdfDocument
}
}
Here is a way that I got this working. It uses a completely hidden WKWebView to render the content, using a delegate callback and javascript execution to determine when the page has fully loaded.
When ready, the new iOS 15 WKWebView method .pdf() is called, which generates the PDF data.
Bindings are used to update the parent view, which generates a PDFDocument from the data and navigates to it.
Caveat: The native WKWebView PDF methods do not produce paginated PDF files, so it will be one long page!
In the main view it starts with a button tap. This view has a pdfData variable that is of type Data and will be used to make the PDF document.
Button("Generate PDF") {
// Prevent triggering again while already processing
guard !isWorking else { return }
// Reset flags
isReadyToRenderPDF = false
isWorking = true
// Initialise webView with frame equal to A4 paper size in points
// (I also tried UIScreen.main.bounds)
self.wkWebView = WKWebView(frame: CGRect(origin: .zero, size: CGSize(width: 595, height: 842))) // A4 size
// Set web view navigation delegate.
// You must keep a strong reference to this, so I made it an #State property on this view
// This is a class that takes bindings so it can update the data and signal completion
self.navigationDelegate = WKWebViewDelegate(wkWebView!, pdfData: $data, isReadyToRenderPDF: $isReadyToRenderPDF)
wkWebView!.navigationDelegate = self.navigationDelegate
// Generate HTML. You could also use a simple HTML string.
// This is just my implementation.
let htmlContent = htmlComposer.makeHTMLString()
// Load HTML into web view
wkWebView!.loadHTMLString(htmlContent, baseURL: nil)
// Now the navigation delegate responds when data is updated.
}
The WKWebView delegate class has this callback method.
func webView(_ webView: WKWebView, didFinish navigation: WKNavigation!) {
// This ensures following code only runs once, as delegate method is called multiple times.
guard !readyOnce else { return }
// Use javascript to check document ready state (all images and resources loaded)
webView.evaluateJavaScript("document.readyState == \"complete\"") { result, error in
if (error != nil) {
print(error!.localizedDescription)
return
} else if result as? Int == 1 {
self.readyOnce = true
do {
// Create PDF using WKWebView method and assign it to the binding (updates data variable in main view)
self.pdfData = try await webView.pdf()
// Signal to parent view via binding
self.isReadyToRenderPDF = true
} catch {
print(error.localizedDescription)
}
} else {
return
}
}
}
Back in parent view, respond to the change of Boolean value.
.onChange(of: isReadyToRenderPDF) { _ in
isWorking = false
DispatchQueue.main.async {
// This toggle navigation link or sheet displaying PDF View using pdfData variable
isShowingPDFView = true
}
}
Finally, here is a PDFKit PDFView wrapped in UIViewRepresentable.
import SwiftUI
import PDFKit
struct PDFKitRepresentedView: UIViewRepresentable {
let data: Data
init(_ data: Data) {
self.data = data
}
func makeUIView(context: UIViewRepresentableContext<PDFKitRepresentedView>) -> PDFKitRepresentedView.UIViewType {
// Create PDFKit view and document
let pdfView = PDFView()
pdfView.document = PDFDocument(data: data)
pdfView.autoScales = true
pdfView.displaysPageBreaks = true
pdfView.usePageViewController(true, withViewOptions: nil)
pdfView.displayDirection = .horizontal
pdfView.displayMode = .singlePage
return pdfView
}
func updateUIView(_ uiView: UIView, context: UIViewRepresentableContext<PDFKitRepresentedView>) {
// Not implemented
}
}

Getting nil when calling a wrapped UIViewController's function in a SwiftUI class

So below is a simple picture taking ViewController that I have
final class TakePhotoViewController : UIViewController, AVCapturePhotoCaptureDelegate {
var captureSession : AVCaptureSession!
var cameraOutput : AVCapturePhotoOutput!
var previewLayer : AVCaptureVideoPreviewLayer!
let device = AVCaptureDevice.default(for: .video)!
override func viewDidLoad(){
print("viewDidLoad")
setupCameraLayouts()
}
private func setupCameraLayouts(){
print("setupCameraLayouts")
captureSession = AVCaptureSession()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
captureSession.sessionPreset = AVCaptureSession.Preset.hd1920x1080
cameraOutput = AVCapturePhotoOutput()
previewLayer.frame = CGRect(x: view.frame.origin.x, y: view.frame.origin.y+view.frame.height/13, width: view.frame.width, height: view.frame.height/1.2475)
do {
try device.lockForConfiguration()
} catch {
return
}
device.focusMode = .continuousAutoFocus
device.unlockForConfiguration()
}
private func startCamera(){
print("startCamera")
if let input = try? AVCaptureDeviceInput(device: device) {
if captureSession.canAddInput(input) {
captureSession.addInput(input)
if captureSession.canAddOutput(cameraOutput){
captureSession.addOutput(cameraOutput)
view.layer.addSublayer(previewLayer)
captureSession.startRunning()
} else {
print("else in : captureSession.canAddOutput(cameraOutput)")
}
} else {
print("else in : captureSession.canAddInput(input)")
}
} else {
print("else in : input = try? AVCaptureDeviceInput(device: device)")
}
}
func cameraPressed(){
print("cameraPressed")
let settings = AVCapturePhotoSettings()
let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first!
let previewFormat = [
kCVPixelBufferPixelFormatTypeKey as String: previewPixelType,
kCVPixelBufferWidthKey as String: 160,
kCVPixelBufferHeightKey as String: 160
]
settings.previewPhotoFormat = previewFormat
cameraOutput.capturePhoto(with: settings, delegate: self)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?){
print("photoOutput")
captureSession.stopRunning()
print("Got something")
}
}
extension TakePhotoViewController : UIViewControllerRepresentable {
public typealias UIViewControllerType = TakePhotoViewController
func makeUIViewController(context: UIViewControllerRepresentableContext<TakePhotoViewController>) -> TakePhotoViewController {
print("makeUIViewController")
return TakePhotoViewController()
}
func updateUIViewController(_ uiViewController: TakePhotoViewController, context: UIViewControllerRepresentableContext<TakePhotoViewController>) {
print("updateUIViewController")
}
}
As you can see, I have it "wrapped" using UIViewControllerRepresentable so I can use it in SwiftUI View. Unless there's better ways to do this, I found that this was the only way to do it.
Below is the SwiftUI class where i'm calling this.
struct ContentView: View {
let TPVC = TakePhotoViewController()
var body: some View {
VStack {
TPVC.startCamera()
Button(action: {
self.TPVC.cameraPressed()
}) {
Text("Hello World")
.fontWeight(.bold)
.font(.title)
.padding()
.background(Color.purple)
.cornerRadius(40)
.foregroundColor(.white)
.padding(10)
.overlay(
RoundedRectangle(cornerRadius: 40)
.stroke(Color.purple, lineWidth: 5)
)
}
}
}
}
So I know (from the print statements) that the the TakePhotoVC is being called and the nil error variables (and, really, all variables) aren't nil.
That being said, when the error happens (which is when I click the button), the various variables (captureSession, CaptureOutput) are nil which causes obvious errors. In the ContentView I create a variable for the class instance so I can reference to it whenever but it seems that if you call it again it creates a whole new class reference/instance
Your ContentView is a struct, ie. value, it is recreated on each UI refresh, so your TPVC also recreated.
Moreover UIViewControllerRepresentable expected to be some View to be used in ViewBuilder. So it is mis-concept in your code.
The tutorial Interfacing with UIKit should help you.

How to modify previewView in swift?

I'm building camera App.
I want to preview and photo frame 1:1.
But how can I do that?
I've tried previewView frame change.
self.previewView?.frame.size = CGSize(width: 300, height: 300)
But It does not working.
class CameraViewController: UIViewController {
// MARK: - Properties
// MARK: Declared
var captureSession: AVCaptureSession?
var captureOutput: AVCapturePhotoOutput?
// MARK: IBOutlet
#IBOutlet weak var previewView: PreviewView!
// MARK: - Methods
// MARK: View Life Cycle
override func viewDidLoad() {
super.viewDidLoad()
self.configureInput()
self.configureOutput()
self.configurePreview()
self.runCamera()
}
// MARK: Configure
private func configureInput() {
self.captureSession = AVCaptureSession()
self.captureSession?.beginConfiguration()
self.captureSession?.sessionPreset = .hd4K3840x2160
guard let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back) else { return }
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice), self.captureSession?.canAddInput(videoDeviceInput) == true else { return }
self.captureSession?.addInput(videoDeviceInput)
}
private func configureOutput() {
let photoOutput = AVCapturePhotoOutput()
self.captureOutput = photoOutput
guard self.captureSession?.canAddOutput(photoOutput) == true else { return }
self.captureSession?.sessionPreset = .photo
self.captureSession?.addOutput(photoOutput)
self.captureSession?.commitConfiguration()
}
private func configurePreview() {
self.previewView?.videoPreviewlayer.session = self.captureSession
}
private func runCamera() {
self.captureSession?.startRunning()
}
}
This is my code.
I made this read after apple's article. (https://developer.apple.com/documentation/avfoundation/cameras_and_media_capture/setting_up_a_capture_session)
You could use this to change the preview layer frame to make it fill your preview view:
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
previewLayer.frame = cameraView.bounds
previewLayer.videoGravity = .resizeAspectFill
}