How to using realtime camera streaming in swiftui? - streaming

I build a StreamingView like this:
struct StreamingView: UIViewRepresentable {
func updateUIView(_ uiView: UIView, context: UIViewRepresentableContext<StreamingView>) {
//
}
func makeUIView(context: UIViewRepresentableContext<StreamingView>) -> UIView {
let view = UIView()
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
guard let captureDevice = AVCaptureDevice.default(for: .video) else { return view}
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return view}
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
return view
}
}
but It didn't work. how could I build a pure swiftui view for streaming?

Try the below demo code
Note: make sure all preparations done, like turned on Camera in capabilities, added NSCameraUsageDescription in Info.plist... and camera can be tested only on a real device.
import SwiftUI
import UIKit
import AVFoundation
class PreviewView: UIView {
private var captureSession: AVCaptureSession?
init() {
super.init(frame: .zero)
var allowedAccess = false
let blocker = DispatchGroup()
blocker.enter()
AVCaptureDevice.requestAccess(for: .video) { flag in
allowedAccess = flag
blocker.leave()
}
blocker.wait()
if !allowedAccess {
print("!!! NO ACCESS TO CAMERA")
return
}
// setup session
let session = AVCaptureSession()
session.beginConfiguration()
let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera,
for: .video, position: .unspecified) //alternate AVCaptureDevice.default(for: .video)
guard videoDevice != nil, let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!), session.canAddInput(videoDeviceInput) else {
print("!!! NO CAMERA DETECTED")
return
}
session.addInput(videoDeviceInput)
session.commitConfiguration()
self.captureSession = session
}
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
override func didMoveToSuperview() {
super.didMoveToSuperview()
if nil != self.superview {
self.videoPreviewLayer.session = self.captureSession
self.videoPreviewLayer.videoGravity = .resizeAspect
self.captureSession?.startRunning()
} else {
self.captureSession?.stopRunning()
}
}
}
struct PreviewHolder: UIViewRepresentable {
func makeUIView(context: UIViewRepresentableContext<PreviewHolder>) -> PreviewView {
PreviewView()
}
func updateUIView(_ uiView: PreviewView, context: UIViewRepresentableContext<PreviewHolder>) {
}
typealias UIViewType = PreviewView
}
struct DemoVideoStreaming: View {
var body: some View {
VStack {
PreviewHolder()
}.frame(minWidth: 0, maxWidth: .infinity, minHeight: 0, maxHeight: .infinity, alignment: .center)
}
}

Related

Adding a continuously looped video to macOS SwiftUI app

I want to add a few looped videos to a macOS app written in SwiftUI, but when I run it the video doesn't play.
I'm not sure if I've over engineered this or have a bug I can't spot - so hopefully someone can help!
I want call the VideoTutorialView(videoName:) passing in a different video.
import SwiftUI
import AVFoundation
struct VideoTutorialView: View {
#State private var player = AVQueuePlayer()
let center = NotificationCenter.default
let videoName: String
var body: some View {
PlayerView(videoName: videoName, player: player)
.aspectRatio(CGFloat(16 / 9), contentMode: .fill)
.frame(height: 140)
.background(Color.gray.opacity(0.3))
.cornerRadius(.roundedCorner)
.onAppear { player.play() }
.onDisappear { player.pause() }
.onReceive(center.publisher(for: NSApplication.willResignActiveNotification)) { _ in
player.pause()
}
.onReceive(center.publisher(for: NSApplication.didBecomeActiveNotification)) { _ in
player.play()
}
}
}
struct PlayerView: NSViewRepresentable {
private let videoName: String
private let player: AVQueuePlayer
init(videoName: String, player: AVQueuePlayer) {
self.videoName = videoName
self.player = player
}
func updateNSView(_ nsView: NSView, context: NSViewRepresentableContext<PlayerView>) {}
func makeNSView(context: Context) -> NSView {
return LoopingPlayerUIView(videoName: videoName, player: player)
}
}
class LoopingPlayerUIView: NSView {
private let playerLayer = AVPlayerLayer()
private var playerLooper: AVPlayerLooper?
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
init(videoName: String,
player: AVQueuePlayer,
videoGravity: AVLayerVideoGravity = .resizeAspectFill) {
super.init(frame: .zero)
guard let fileUrl = Bundle.main.url(forResource: videoName, withExtension: "mp4") else { return }
let asset = AVAsset(url: fileUrl)
let item = AVPlayerItem(asset: asset)
player.isMuted = true
playerLayer.player = player
playerLayer.videoGravity = videoGravity
layer?.addSublayer(playerLayer)
playerLooper = AVPlayerLooper(player: player, templateItem: item)
}
override func layout() {
super.layout()
playerLayer.frame = bounds
}
}
super.init(frame: .zero)
Looks suspicious. You should try setting some values to see the video that are consistent with your SwiftUI view.

Is there any way where we can get the current page number in PDFView and use it in SwiftUI

I am making an app pdf reader using PDFKit but I am unable to get the current page number. I can get the total pages by pdfView.document?.pageCount. The alternative way we can use for this is to change the page by button and count it but I want the PDFView default feature Changing the page by Swipe by sitting the pdfView.usePageViewController(true) but it does not give any method to get the current page number
Code
struct ContentView: View {
let url = Bundle.main.url(forResource: "file", withExtension: "pdf")
var body: some View {
VStack{
PDFKitRepresentedView(data: try! Data(contentsOf: url!))
}
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
import PDFKit
import SwiftUI
struct PDFKitRepresentedView: UIViewRepresentable {
typealias UIViewType = PDFView
let data: Data
func makeUIView(context _: UIViewRepresentableContext<PDFKitRepresentedView>) -> UIViewType {
// Create a `PDFView` and set its `PDFDocument`.
let pdfView = PDFView(frame: CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height))
pdfView.document = PDFDocument(data: data)
pdfView.backgroundColor = UIColor.red
pdfView.displayMode = .singlePage
pdfView.displayDirection = .horizontal
pdfView.usePageViewController(true)
pdfView.maxScaleFactor = pdfView.scaleFactorForSizeToFit
pdfView.minScaleFactor = pdfView.scaleFactorForSizeToFit
pdfView.autoScales = true
return pdfView
}
func updateUIView(_ pdfView: UIViewType, context _: UIViewRepresentableContext<PDFKitRepresentedView>) {
pdfView.document = PDFDocument(data: data)
}
}
Update
According to suggestion given by
workingdog support Ukraine below the coordinator class printing the result but when I use Binding to pass the currentPage to SwiftUI its not working the page number is not updating in UI and on swiping its repeating the first two pages only
New Updated code
struct ContentView: View {
#State var currentPage = -1
#State var totalPages :Int?
let url = Bundle.main.url(forResource: "file", withExtension: "pdf")
var body: some View {
VStack{
HStack{
Text("\(currentPage)/")
Text("\(totalPages ?? 0)")
}
if let url = url {
PDFKitRepresentedView(data:try! Data(contentsOf: url),totalPages: $totalPages,currentPage: $currentPage)
}
}
}
}
struct PDFKitRepresentedView: UIViewRepresentable {
typealias UIViewType = PDFView
let data: Data
#Binding var totalPages:Int?
#Binding var currentPage :Int
let pdfView = PDFView(frame: CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height))
func makeUIView(context: Context) -> UIViewType {
pdfView.document = PDFDocument(data: data)
pdfView.backgroundColor = UIColor.red
pdfView.displayMode = .singlePage
pdfView.displayDirection = .horizontal
pdfView.usePageViewController(true)
pdfView.maxScaleFactor = pdfView.scaleFactorForSizeToFit
pdfView.minScaleFactor = pdfView.scaleFactorForSizeToFit
pdfView.autoScales = true
pdfView.delegate = context.coordinator
return pdfView
}
func updateUIView(_ pdfView: UIViewType, context _: Context) {
pdfView.document = PDFDocument(data: data)
DispatchQueue.main.async {
totalPages = pdfView.document?.pageCount
}
}
func makeCoordinator() -> Coordinator {
return Coordinator(self, cp: $currentPage)
}
class Coordinator: NSObject, PDFViewDelegate {
var parent: PDFKitRepresentedView
#Binding var currentPage :Int
init(_ parent: PDFKitRepresentedView,cp:Binding<Int>) {
self.parent = parent
_currentPage = cp
super.init()
NotificationCenter.default.addObserver(self, selector: #selector(pageChangeHandler(_:)), name: .PDFViewPageChanged, object: nil)
}
#objc func pageChangeHandler(_ notification: Notification) {
if let thePage = parent.pdfView.currentPage,
let ndx = parent.pdfView.document?.index(for: thePage),
currentPage != ndx {
DispatchQueue.main.async {
self.currentPage = ndx
}
print("--------> currentPageIndex: \(ndx) ")
}
}
}
}
According to the docs at: https://developer.apple.com/documentation/pdfkit/pdfview there is a currentPage that returns the current page. You could then use something like this to get the index of it:
if let thePage = pdfView.currentPage, let ndx = pdfView.document?.index(for: thePage) {
print("--> currentPageIndex: \(ndx) ")
// ....
}
EDIT-1:
Try the following approach, using a Coordinator class for the PDFViewDelegate and getting notified when a .PDFViewPageChanged with a NotificationCenter.default.addObserver(...).
You will have to adjust the code for your own purpose.
struct PDFKitRepresentedView: UIViewRepresentable {
typealias UIViewType = PDFView
let data: Data
let pdfView = PDFView(frame: CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height))
func makeUIView(context: Context) -> UIViewType {
pdfView.document = PDFDocument(data: data)
pdfView.backgroundColor = UIColor.red
pdfView.displayMode = .singlePage
pdfView.displayDirection = .horizontal
pdfView.usePageViewController(true)
pdfView.maxScaleFactor = pdfView.scaleFactorForSizeToFit
pdfView.minScaleFactor = pdfView.scaleFactorForSizeToFit
pdfView.autoScales = true
pdfView.delegate = context.coordinator
return pdfView
}
func updateUIView(_ pdfView: UIViewType, context _: Context) {
pdfView.document = PDFDocument(data: data)
}
func makeCoordinator() -> Coordinator {
return Coordinator(self)
}
class Coordinator: NSObject, PDFViewDelegate {
var parent: PDFKitRepresentedView
var prevPage = -1
init(_ parent: PDFKitRepresentedView) {
self.parent = parent
super.init()
NotificationCenter.default.addObserver(self, selector: #selector(pageChangeHandler(_:)), name: .PDFViewPageChanged, object: nil)
}
#objc func pageChangeHandler(_ notification: Notification) {
if let thePage = parent.pdfView.currentPage,
let ndx = parent.pdfView.document?.index(for: thePage),
prevPage != ndx {
print("--------> currentPageIndex: \(ndx) ")
prevPage = ndx
}
}
}
}
EDIT-2:
To access the currentPage in ContentView, that is, outside the PDFViewer,
you can use the following approach.
It uses a .onReceive(...) of a page change notification, and some minor changes of
the original code.
struct ContentView: View {
#State var currentPage = 0
let pdfViewer: PDFViewer // <--- here
init() {
if let url = Bundle.main.url(forResource: "file", withExtension: "pdf"),
let docData = try? Data(contentsOf: url) {
self.pdfViewer = PDFViewer(data: docData)
} else {
self.pdfViewer = PDFViewer(data: Data())
}
}
var body: some View {
VStack {
Text("page \(currentPage)")
pdfViewer
.onReceive(NotificationCenter.default.publisher(for: .PDFViewPageChanged)) { _ in
if let thePage = pdfViewer.pdfView.currentPage,
let ndx = pdfViewer.pdfView.document?.index(for: thePage), currentPage != ndx {
currentPage = ndx
}
}
}
}
}
struct PDFViewer: UIViewRepresentable {
typealias UIViewType = PDFView
let data: Data
let pdfView = PDFView(frame: CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height))
func makeUIView(context: Context) -> UIViewType {
pdfView.document = PDFDocument(data: data)
pdfView.backgroundColor = UIColor.red
pdfView.displayMode = .singlePage
pdfView.displayDirection = .horizontal
pdfView.usePageViewController(true)
pdfView.maxScaleFactor = pdfView.scaleFactorForSizeToFit
pdfView.minScaleFactor = pdfView.scaleFactorForSizeToFit
pdfView.autoScales = true
return pdfView
}
func updateUIView(_ pdfView: UIViewType, context _: Context) {
pdfView.document = PDFDocument(data: data)
}
}

AVCaptureDevice builtInWideAngleCamera image does not match preview

I have a Swift project where I am using a UIImageView to show a live preview capture and 'freeze' this image whenever the uses clicks on a 'Take' photo button and have this shown in another UIImageView with identical dimensions and position.
This works great on devices that are able to use builtInDualCamera (such as a iPhone X) but on devices that rely on the fallback builtInWideAngleCamera (such as a 6th gen iPad Mini), the image appears cropped/zoomed in.
Can someone explain whether it is possible/how to get an image identical to the one shown in preview using the builtInDualCamera?
A minimal reproducible example can be found below (simply create a storyboard with 2 button and 2 UIImageViews and hook them up).
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate, AVCapturePhotoCaptureDelegate {
#IBOutlet weak var cameraImageView: UIImageView!
#IBOutlet weak var userImageView: UIImageView!
var captureSession: AVCaptureSession? = AVCaptureSession()
var currentDevice: AVCaptureDevice?
var videoFileOutput: AVCaptureMovieFileOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var cameraOutput : AVCapturePhotoOutput?
func setupCamSession(){
if #available(iOS 10.0, *) {
if let device = AVCaptureDevice.default(.builtInDualCamera, for: AVMediaType.video, position:.front) {
currentDevice = device
} else if let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: AVMediaType.video, position: .front) {
currentDevice = device
}
} else {
// Fallback on earlier versions
let devices = AVCaptureDevice.devices().filter{ ($0 as AnyObject).hasMediaType(AVMediaType.video) && ($0 as AnyObject).position == AVCaptureDevice.Position.front }
if let captureDevice = devices.first {
currentDevice = captureDevice
}
}
if(currentDevice==nil)
{
print("failed")
return
}
else
{
captureSession?.sessionPreset = AVCaptureSession.Preset.medium
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice!) else {
return
}
if (captureSession?.canAddInput(captureDeviceInput))! {
captureSession?.addInput(captureDeviceInput)
cameraOutput = AVCapturePhotoOutput()
if (captureSession?.canAddOutput(cameraOutput!))! {
captureSession?.addOutput(cameraOutput!)
}
}
else
{
print("failed")
return
}
}
func startCamSession()
{
if (captureSession==nil)
{
print("Warning: no captureSession detected")
return
}
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspect
cameraPreviewLayer?.frame = cameraImageView.bounds
cameraImageView.layer.addSublayer(cameraPreviewLayer!)
if let connection = cameraPreviewLayer?.connection {
let previewLayerConnection : AVCaptureConnection = connection
if previewLayerConnection.isVideoOrientationSupported {
previewLayerConnection.videoOrientation = .portrait
cameraPreviewLayer?.frame = cameraImageView.bounds
}
captureSession?.startRunning()
}
}
func stopCamSession()
{
captureSession?.stopRunning()
}
func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if let error = error {
print(error.localizedDescription)
}
if let sampleBuffer = photoSampleBuffer, let previewBuffer = previewPhotoSampleBuffer {
let imageData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewBuffer)
let dataProvider = CGDataProvider(data: imageData! as CFData)
let cgImageRef = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.absoluteColorimetric)
let image = UIImage(cgImage: cgImageRef!, scale: 1.0, orientation: UIImage.Orientation.leftMirrored)
self.userImageView.contentMode = .scaleAspectFit
self.userImageView.image = image
cameraPreviewLayer?.removeFromSuperlayer()
self.stopCamSession()
} else {
}
}
override func viewDidLoad() {
super.viewDidLoad()
setupCamSession()
}
#IBAction func startPressed(_ sender: Any) {
startCamSession()
}
#IBAction func takePhotoPressed(_ sender: Any) {
let settings = AVCapturePhotoSettings()
let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first!
let previewFormat = [
kCVPixelBufferPixelFormatTypeKey as String : previewPixelType,
kCVPixelBufferWidthKey as String : 640,
kCVPixelBufferHeightKey as String : 480
]
settings.previewPhotoFormat = previewFormat
cameraOutput?.capturePhoto(with: settings, delegate: self)
}
}

ARSession CurrentFrame is missing the AR interpretation Model Entities

I have the following ARView:
import SwiftUI
import UIKit
import RealityKit
import ARKit
struct ARViewContainer: UIViewRepresentable {
#EnvironmentObject var selectedFood: SelectedFood
#EnvironmentObject var arSession: ARSessionObservable
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
func makeUIView(context: Context) -> ARView {
let arView = ARView(frame: .zero)
let config = ARWorldTrackingConfiguration()
config.planeDetection = [.vertical, .horizontal]
config.environmentTexturing = .automatic
if ARWorldTrackingConfiguration.supportsSceneReconstruction(.mesh) {
config.sceneReconstruction = .mesh
}
arView.session.delegate = context.coordinator
arView.session.run(config)
arSession.session = arView.session
return arView
}
func updateUIView(_ uiView: ARView, context: Context) {
if (!selectedFood.food.image.isEmpty) {
let data = try! Data(contentsOf: URL(string: self.selectedFood.food.image)!)
let fileURL = FileManager.default.temporaryDirectory.appendingPathComponent(UUID().uuidString)
try! data.write(to: fileURL)
do {
let texture = try TextureResource.load(contentsOf: fileURL)
var material = SimpleMaterial()
material.baseColor = MaterialColorParameter.texture(texture)
material.tintColor = UIColor.white.withAlphaComponent(0.99)
let entity = ModelEntity(mesh: .generatePlane(width: 0.1, height: 0.1), materials: [material])
let anchor = AnchorEntity(.plane(.any, classification: .any, minimumBounds: .zero))
anchor.addChild(entity)
uiView.scene.addAnchor(anchor)
} catch {
print(error.localizedDescription)
}
}
}
class Coordinator: NSObject, ARSessionDelegate, ARSCNViewDelegate {
var arVC: ARViewContainer
init(_ arViewContainer: ARViewContainer) {
self.arVC = arViewContainer
}
func session(_ session: ARSession, didUpdate frame: ARFrame) {
}
func session(_ session: ARSession, didAdd anchors: [ARAnchor]) {
}
}
}
And in HomeView i have the following two variables:
#StateObject var arSession: ARSessionObservable = ARSessionObservable()
#State private var capturedImage: UIImage = UIImage()
The following button with action:
Button {
if let capturedFrame = arSession.session.currentFrame {
let ciimg = CIImage(cvPixelBuffer: capturedFrame.capturedImage)
if let cgImage = convertCIImageToCGImage(inputImage: ciimg) {
capturedImage = UIImage(cgImage: cgImage).rotate(radians: .pi / 2)
self.isShowingMail = true
}
}
} label: {
Image("ShareScreen")
.resizable()
.aspectRatio(contentMode:.fit)
.frame(width: 66, height: 66, alignment: .center)
}
Which takes the currentFrame from the session and opens a Mail sharing model with attachment:
.sheet(isPresented: $isShowingMail) {
MailComposeViewController(toRecipients: [], mailBody: nil, imageAttachment: capturedImage) {
self.isShowingMail = false
}
The mail sharing:
func makeUIViewController(context: UIViewControllerRepresentableContext<MailComposeViewController>) -> MFMailComposeViewController {
let mail = MFMailComposeViewController()
mail.mailComposeDelegate = context.coordinator
mail.setToRecipients(self.toRecipients)
if let body = mailBody {
mail.setMessageBody(body, isHTML: true)
}
if let image = imageAttachment {
if let imageData = image.pngData() {
mail.addAttachmentData(imageData, mimeType: "image/png", fileName: "image.png")
}
}
return mail
}
The problem is that on the preview there are present the Model Entities, photo below:
And when i press share, on the mail preview the model is missing from the frame:
I managed to make it work by moving arView: ARView! outside the ARViewContainer
var arView: ARView!
struct ARViewContainer: UIViewRepresentable {
func makeUIView(context: Context) -> ARView {
arView = ARView(frame: .zero)
let config = ARWorldTrackingConfiguration()
config.planeDetection = [.vertical, .horizontal]
config.environmentTexturing = .automatic
if ARWorldTrackingConfiguration.supportsSceneReconstruction(.mesh) {
config.sceneReconstruction = .mesh
}
arView.session.delegate = context.coordinator
arView.session.run(config)
return arView
}
}
And then calling the snapshot function to arView in the other View:
Button {
arView.snapshot(saveToHDR: false) { image in
let image = UIImage(data: (image?.pngData())!)
capturedImage = image!
self.isShowingMail = true
}

Swift 3: How do I enable flash on custom AVFoundation camera?

I have a very basic AVFoundation Camera that has a captureButton that will take a photo and send that photo to the secondCameraController for it to be displayed. My problem is that there is a lot of iOS 10 deprecation and I'm not sure how I add in a flash when I press the captureButton. Any help will be highly appreciated. My code is below. Thank you guys.
class CameraController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let captureSession = AVCaptureSession()
var previewLayer: CALayer!
var captureDevice: AVCaptureDevice!
var takePhoto: Bool = false
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = .white
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
prepareCamera()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
navigationController?.setNavigationBarHidden(true, animated: true)
}
let cameraView: UIView = {
let view = UIView()
view.backgroundColor = .red
return view
}()
func prepareCamera() {
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
if let availableDevices = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .back).devices {
captureDevice = availableDevices.first
beginSession()
}
}
func beginSession() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = CGRect(x: 0, y: 0, width: view.frame.width, height: view.frame.height)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
self.view.addSubview(captureButton)
let width: CGFloat = 85
captureButton.frame = CGRect(x: (previewLayer.frame.width / 2) - width / 2, y: (previewLayer.frame.height) - width - 25, width: width, height: 85)
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.cheekylabsltd.camera")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
}
func handleCapture() {
takePhoto = true
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if takePhoto {
takePhoto = false
if let image = self.getImageFromSampleBuffer(buffer: sampleBuffer) {
let secondController = SecondCameraController()
secondController.takenPhoto = image
DispatchQueue.main.async {
self.present(secondController, animated: true, completion: {
self.stopCaptureSession()
})
}
}
}
}
func getImageFromSampleBuffer(buffer: CMSampleBuffer) -> UIImage? {
if let pixelBuffer = CMSampleBufferGetImageBuffer(buffer) {
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext()
let imageRect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
if let image = context.createCGImage(ciImage, from: imageRect) {
return UIImage(cgImage: image, scale: UIScreen.main.scale, orientation: .right)
}
}
return nil
}
func stopCaptureSession() {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
lazy var captureButton: UIButton = {
let button = UIButton(type: .system)
button.backgroundColor = .white
button.layer.cornerRadius = 42.5
button.clipsToBounds = true
button.alpha = 0.40
button.layer.borderWidth = 4
button.layer.borderColor = greenColor.cgColor
button.addTarget(self, action: #selector(handleCapture), for: .touchUpInside)
return button
}()
}
Try this code :
Swift v3.0
private func flashOn(device:AVCaptureDevice)
{
do{
if (device.hasTorch)
{
try device.lockForConfiguration()
device.torchMode = .on
device.flashMode = .on
device.unlockForConfiguration()
}
}catch{
//DISABEL FLASH BUTTON HERE IF ERROR
print("Device tourch Flash Error ");
}
}
//FOR FLASH OFF CODE
private func flashOff(device:AVCaptureDevice)
{
do{
if (device.hasTorch){
try device.lockForConfiguration()
device.torchMode = .off
device.flashMode = .off
device.unlockForConfiguration()
}
}catch{
//DISABEL FLASH BUTTON HERE IF ERROR
print("Device tourch Flash Error ");
}
}
// METHOD
//private let session = AVCaptureSession()
//MARK: FLASH UITLITY METHODS
func toggleFlash() {
var device : AVCaptureDevice!
if #available(iOS 10.0, *) {
let videoDeviceDiscoverySession = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera, .builtInDuoCamera], mediaType: AVMediaTypeVideo, position: .unspecified)!
let devices = videoDeviceDiscoverySession.devices!
device = devices.first!
} else {
// Fallback on earlier versions
device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
}
if ((device as AnyObject).hasMediaType(AVMediaTypeVideo))
{
if (device.hasTorch)
{
self.session.beginConfiguration()
//self.objOverlayView.disableCenterCameraBtn();
if device.isTorchActive == false {
self.flashOn(device: device)
} else {
self.flashOff(device: device);
}
//self.objOverlayView.enableCenterCameraBtn();
self.session.commitConfiguration()
}
}
}
Swift 4
So there are two different behaviors to choose from in AVFoundation. One would be a capture device torch switch. Connect the torchSwitch action to some view and be sure to change CameraManager.shared.backDevice to your instance of the front or back device that provides the current input.
#IBAction func torchSwitch(_ sender: Any) {
guard let device = CameraManager.shared.backDevice else { return }
guard device.isTorchAvailable else { return }
do {
try device.lockForConfiguration()
device.torchMode = device.torchMode ? .off : .on
if device.torchMode == .on {
try device.setTorchModeOn(level: 0.7)
}
} catch {
debugPrint(error)
}
}
AVFoundation has deprecated device.flashMode
Now to set flash, declare a variable on camera or vc. The value here will be the default.
var flash: AVCaptureFlashMode = .off
Connect this action to some view
#IBAction func torchSwitch(_ sender: Any) { flash = flash ? .off : .on }
Then when you want to capture an image, use AVCapturePhotoOutput and prepare the photo settings. stillCameraOutput is an instance of AVCapturePhotoOutput.
let settings = AVCapturePhotoSettings()
settings.flashMode = flash
stillCameraOutput.capturePhoto(with: settings, delegate: self)
Swift 4 :
Following code is working fine for me
private enum FlashPhotoMode {
case on
case off
}
#IBOutlet weak var flashPhotoModeButton: UIButton!
#IBAction func toggleFlashPhotoMode(_ flashPhotoModeButton: UIButton ) {
sessionQueue.async {
self.flashPhotoMode = (self.flashPhotoMode == .on) ? .off : .on
let flashPhotoMode = self.flashPhotoMode
DispatchQueue.main.async {
if flashPhotoMode == .on {
self.flashPhotoModeButton.setBackgroundImage(UIImage(named: "flashON"), for: .normal)
print("flashON")
} else {
self.flashPhotoModeButton.setBackgroundImage(UIImage(named: "flashOFF"), for: .normal)
print("flashOFF")
}
}
}
}
#IBAction private func capturePhoto(_ photoButton: UIButton) {
................
.......................
if self.videoDeviceInput.device.isFlashAvailable {
if self.flashPhotoMode == .on {
photoSettings.flashMode = .on
print("FLASH ON ")
} else {
print("FLASH OFF ")
photoSettings.flashMode = .off
}
}
}
Thanks!