Swift. How to get value of variable outside the function - swift

I am working on robotics project and trying to get position of tracking object as a value of the yTrack variable (see the code). I can print yTrack from the func handleVisionRequestUpdate, but I need access to yTrack outside this function in order to use it with other function. getCoord () as an example. Please help!
import AVFoundation
import Vision
import UIKit
import Foundation
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
var protocolString: String?
var inputStream: InputStream?
var outputStream: OutputStream?
var dataAsString: String?
var yTrack: Double?
#IBOutlet private weak var cameraView: UIView?
#IBOutlet private weak var highlightView: UIView? {
didSet {
self.highlightView?.layer.borderColor = UIColor.red.cgColor
self.highlightView?.layer.borderWidth = 4
self.highlightView?.backgroundColor = .clear
}
}
private let visionSequenceHandler = VNSequenceRequestHandler()
private lazy var cameraLayer: AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
private lazy var captureSession: AVCaptureSession = {
let session = AVCaptureSession()
session.sessionPreset = AVCaptureSession.Preset.photo
guard
let backCamera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back),
let input = try? AVCaptureDeviceInput(device: backCamera)
else { return session }
session.addInput(input)
return session
}()
override func viewDidLoad() {
super.viewDidLoad()
self.highlightView?.frame = .zero
self.cameraView?.layer.addSublayer(self.cameraLayer)
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "MyQueue"))
self.captureSession.addOutput(videoOutput)
self.captureSession.startRunning()
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
self.cameraLayer.frame = self.cameraView?.bounds ?? .zero
}
public var lastObservation: VNDetectedObjectObservation?
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard
let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer),
let lastObservation = self.lastObservation
else { return }
let request = VNTrackObjectRequest(detectedObjectObservation: lastObservation, completionHandler: self.handleVisionRequestUpdate)
request.trackingLevel = .fast
do {
try self.visionSequenceHandler.perform([request], on: pixelBuffer)
} catch {
print("Throws: \(error)")
}
}
public func handleVisionRequestUpdate(_ request: VNRequest, error: Error?) {
test {(yTrack) in
DispatchQueue.main.async {
guard let newObservation = request.results?.first as? VNDetectedObjectObservation else { return }
self.lastObservation = newObservation
guard newObservation.confidence >= 0.3 else {
self.highlightView?.frame = .zero
return
}
self.transformedRect = newObservation.boundingBox
self.transformedRect!.origin.y = 1 - self.transformedRect!.origin.y
let convertedRect = self.cameraLayer.layerRectConverted(fromMetadataOutputRect: self.transformedRect!)
self.highlightView?.frame = convertedRect
}
}
let yTrack = Double(self.transformedRect?.origin.y ?? 0.5)
// HERE IT WORKS, yTrack IS PRINTING, BUT I NEED IT OUTSIDE THIS FUNCTION
print(yTrack as Any)
}
public func test (returnCompletion: #escaping (AnyObject) -> () ){
DispatchQueue.global(qos: .background).async {
self.yTrack = Double(self.transformedRect?.origin.y ?? 0.5)
returnCompletion(self.yTrack as AnyObject)
}
}
public func getCoord () {
//HERE IT DOESN'T WORK. NOTHING IS PRINTING FROM HERE.
print(yTrack)
}

The problem is with your handleVisionRequestUpdate function.
First of all #Azat was right in comments: when you declare let yTrack = Double(self.transformedRect?.origin.y ?? 0.5) you create new variable in scope of this function which has no connection with var yTrack from ViewController's scope.
So when you print yTrack in this function you print function's internal variable which will be destroyed after function returns. To be able to use yTrack outside of a function you need to assign new value to ViewController's youTrack and then you'll be able to use it in any function you want
yTrack = Double(self.transformedRect?.origin.y ?? 0.5)
The second problem is with DispatchQueue.main.async. The code inside this block will be performed in most cases after this block
let yTrack = Double(self.transformedRect?.origin.y ?? 0.5)
print(yTrack as Any)
that's because using this block you tell compiler "create separate «queue» for this block of code and asynchronously perform it when you'll be able to do it" so this line self.transformedRect = newObservation.boundingBox will be done in most cases after this line let yTrack = Double(self.transformedRect?.origin.y ?? 0.5) and you'll have previous transformedRect inside this line which is nil if I understand correctly. So remove DispatchQueue.main.async from this function or move self.yTrack = Double(self.transformedRect?.origin.y ?? 0.5) into it.

Check out the code below. I added a lot of comments but I didn't test it out. But it should fix things. There were a couple confusing things going on. Specifically with the "test" function. Also, you may want to practice using self.myVar or self.myFunc() so that you start to get a better understanding of which variables are local and which are properties on the view controller:
public func handleVisionRequestUpdate(_ request: VNRequest, error: Error?) {
// test {(yTrack) in /// remove this since we removed it below
DispatchQueue.main.async {
guard let newObservation = request.results?.first as? VNDetectedObjectObservation else { return }
self.lastObservation = newObservation
guard newObservation.confidence >= 0.3 else {
self.highlightView?.frame = .zero
return
}
self.transformedRect = newObservation.boundingBox
self.transformedRect!.origin.y = 1 - self.transformedRect!.origin.y
let convertedRect = self.cameraLayer.layerRectConverted(fromMetadataOutputRect: self.transformedRect!)
self.highlightView?.frame = convertedRect
//
// Move these into the Dispatch closure
// let yTrack = Double(self.transformedRect?.origin.y ?? 0.5) // delete this one
yTrack = Double(self.transformedRect?.origin.y ?? 0.5) // replace it with this one
print(yTrack as Any)
}
}
//
// Remove this. I'm not sure what it does, but its making things more complex
//
// public func test (returnCompletion: #escaping (AnyObject) -> () ){
// DispatchQueue.global(qos: .background).async {
// self.yTrack = Double(self.transformedRect?.origin.y ?? 0.5)
// returnCompletion(self.yTrack as AnyObject)
// }
// }

Related

Firestore pagination using MVVM architecture swift

I don't quite understand what I am doing wrong since I am very new to MVVM. It worked in MVC architecture. I've setup my VM and am able to get the first set of results and even then that's not working properly. I get 4 results instead of 10 which is what LOADLIMIT is set as. I was able to get it to work in an MVC architecture without any issues. The VM function which triggers the query is called multiple (3) times instead of just once i.e. even prior to scrolling.
Here is my VM:
enum FetchRestaurant {
case success
case error
case location
case end
}
class ListViewModel {
let restaurant: [Restaurant]?
let db = Firestore.firestore()
var restaurantArray = [Restaurant]()
var lastDocument: DocumentSnapshot?
var currentLocation: CLLocation?
typealias fetchRestaurantCallback = (_ restaurants: [Restaurant]?, _ message: String?, _ status: FetchRestaurant) -> Void
var restaurantFetched: fetchRestaurantCallback?
var fetchRestaurant: FetchRestaurant?
init(restaurant: [Restaurant]) {
self.restaurant = restaurant
}
func fetchRestaurantCallback (callback: #escaping fetchRestaurantCallback) {
self.restaurantFetched = callback
}
func fetchRestaurants(address: String) {
print("address received: \(address)")
getLocation(from: address) { location in
if let location = location {
self.currentLocation = location
self.queryGenerator(at: location)
} else {
self.restaurantFetched?(nil, nil, .location)
}
}
}
func queryGenerator(at location: CLLocation) {
var query: Query!
if restaurantArray.isEmpty {
query = db.collection("Restaurant_Data").whereField("distributionType", isLessThanOrEqualTo: 2).limit(to: Constants.Mealplan.LOADLIMIT)
} else {
print("last document:\(String(describing: lastDocument?.documentID))")
query = db.collection("Restaurant_Data").whereField("distributionType", isLessThanOrEqualTo: 2).start(afterDocument: lastDocument!).limit(to: Constants.Mealplan.LOADLIMIT)
}
batchFetch(query: query)
}
func batchFetch(query: Query) {
query.getDocuments { (querySnapshot, error) in
if let error = error {
self.restaurantFetched?(nil, error.localizedDescription, .error)
} else if querySnapshot!.isEmpty {
self.restaurantFetched?(nil, nil, .end)
} else if !querySnapshot!.isEmpty {
let queriedRestaurants = querySnapshot?.documents.compactMap { querySnapshot -> Restaurant? in
return try? querySnapshot.data(as: Restaurant.self)
}
guard let restaurants = queriedRestaurants,
let currentLocation = self.currentLocation else {
self.restaurantFetched?(nil, nil, .end)
return }
self.restaurantArray.append(contentsOf: self.applicableRestaurants(allQueriedRestaurants: restaurants, location: currentLocation))
DispatchQueue.main.asyncAfter(deadline: .now(), execute: {
self.restaurantFetched?(self.restaurantArray, nil, .success)
})
self.lastDocument = querySnapshot!.documents.last
}
}
}
func getLocation(from address: String, completionHandler: #escaping (_ location: CLLocation?) -> Void) {
let geocoder = CLGeocoder()
geocoder.geocodeAddressString(address) { (placemarks, error) in
guard let placemarks = placemarks,
let location = placemarks.first?.location else {
completionHandler(nil)
return
}
completionHandler(location)
}
}
}
And in the VC viewDidLoad:
var fetchMore = false
var reachedEnd = false
let leadingScreensForBatching: CGFloat = 5.0
var searchController = UISearchController(searchResultsController: nil)
var currentAddress : String?
var listViewModel = ListViewModel(restaurant: [Restaurant]())
override func viewDidLoad() {
super.viewDidLoad()
listViewModel.fetchRestaurantCallback { (restaurants, error, result) in
switch result {
case .success :
self.loadingShimmer.stopShimmering()
self.loadingShimmer.removeFromSuperview()
guard let fetchedRestaurants = restaurants else { return }
self.restaurantArray.append(contentsOf: fetchedRestaurants)
self.tableView.reloadData()
self.fetchMore = false
case .location :
self.showAlert(alertTitle: "No businesses nearby", message: "Try going back and changing the address")
case .error :
guard let error = error else { return }
self.showAlert(alertTitle: "Error", message: error)
case .end :
self.fetchMore = false
self.reachedEnd = true
}
}
if let currentAddress = currentAddress {
listViewModel.fetchRestaurants(address: currentAddress)
}
}
I would really appreciate links or resources for implementing MVVM in Swift for a Firestore back-end. I'm coming up short on searches here and on Google. Even tried medium.
EDIT
class ListViewController: UITableViewController {
lazy var loadingShimmer: UIImageView = {
let image = UIImage(named: "shimmer_background")
let imageview = UIImageView(image: image)
imageview.contentMode = .top
imageview.translatesAutoresizingMaskIntoConstraints = false
return imageview
}()
var restaurantArray = [Restaurant]()
var planDictionary = [String: Any]()
var fetchMore = false
var reachedEnd = false
let leadingScreensForBatching: CGFloat = 5.0
var searchController = UISearchController(searchResultsController: nil)
var currentAddress : String?
var listViewModel = ListViewModel(restaurant: [Restaurant]())
override func viewDidLoad() {
super.viewDidLoad()
setupTable()
}
override func viewWillAppear(_ animated: Bool) {
clearsSelectionOnViewWillAppear = false
}
func setupTable() {
navigationItem.backBarButtonItem = UIBarButtonItem(title: "Restaurant", style: .plain, target: nil, action: nil)
tableView.register(RestaurantCell.self, forCellReuseIdentifier: "Cell")
tableView.delegate = self
tableView.dataSource = self
let navigationBarHeight: CGFloat = self.navigationController!.navigationBar.frame.height
tableView.contentInset = UIEdgeInsets(top: 0, left: 0, bottom: -navigationBarHeight, right: 0)
tableView.separatorStyle = .none
tableView.showsVerticalScrollIndicator = false
tableView.addSubview(loadingShimmer)
loadingShimmer.topAnchor.constraint(equalTo: tableView.safeAreaLayoutGuide.topAnchor).isActive = true
loadingShimmer.leadingAnchor.constraint(equalTo: tableView.leadingAnchor).isActive = true
loadingShimmer.trailingAnchor.constraint(equalTo: tableView.trailingAnchor).isActive = true
loadingShimmer.startShimmering()
initialSetup()
}
func initialSetup() {
let addressOne = planDictionary["addressOne"] as! String + ", "
let city = planDictionary["city"] as! String + ", "
let postalCode = planDictionary["postalCode"] as! String
currentAddress = addressOne + city + postalCode
setupSearch()
listViewModel.fetchRestaurantCallback { (restaurants, error, result) in
switch result {
case .success :
self.loadingShimmer.stopShimmering()
self.loadingShimmer.removeFromSuperview()
guard let fetchedRestaurants = restaurants else { return }
self.restaurantArray.append(contentsOf: fetchedRestaurants)
self.tableView.reloadData()
self.fetchMore = false
case .location :
self.showAlert(alertTitle: "No businesses nearby", message: "Try going back and changing the address")
case .error :
guard let error = error else { return }
self.showAlert(alertTitle: "Error", message: error)
case .end :
self.fetchMore = false
self.reachedEnd = true
}
}
if let currentAddress = currentAddress {
listViewModel.fetchRestaurants(address: currentAddress)
}
}
override func scrollViewDidEndDecelerating(_ scrollView: UIScrollView) {
let off = scrollView.contentOffset.y
let off1 = scrollView.contentSize.height
if off > off1 - scrollView.frame.height * leadingScreensForBatching {
print("\(fetchMore), \(reachedEnd)")
if !fetchMore && !reachedEnd {
if let address = self.currentAddress {
print("address sent: \(address)")
listViewModel.fetchRestaurants(address: address)
}
}
}
}
}
That you're only getting back 4 results instead of 10 is not due to a faulty query or get-document request—those are coded properly. You're either losing documents when you parse them (some are failing Restaurant initialization), Constants.Mealplan.LOADLIMIT is wrong, or there aren't more than 4 documents in the collection itself that satisfy the query.
That the query is executed 3 times instead of once is also not due to anything in this code—viewDidLoad is only called once and geocodeAddressString only returns once. You're making a fetch request elsewhere that we can't see.
In the batchFetch method, you have a guard that returns out of the function without ever calling its completion handler. This will leave the UI in a state of limbo. I'd recommend always calling the completion handler no matter why the function returns.
You never manage the document cursor. If the get-document return has less documents than the load limit, then nil the last-document cursor. This way, when you attempt to get the next page of documents, guard against a nil cursor and see if there is even more to fetch.
There's no need to pass in an empty array and have your function fill it; simply construct and return an array of results within ListViewModel itself.
We can't see how you trigger pagination. Is it through a scroll delegate when the user reaches the bottom or through a button tap, for example? If it's through a scroll delegate, then I'd disable that for now and see how many returns you get—I suspect one, instead of 3.
What is the particular reason you've ditched MVC for MVVM here? With MVC, you can get pagination up with just a few lines of code. I think MVVM is overkill for iOS applications and would advise against using it unless you have a compelling reason.

Variable is not updating - func takes default variable

In my ThirdScreenViewController I change the variable number with the IBAction pressed.
import Foundation
import UIKit
class ThirdScreenViewController: UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
}
var weatherManager = WeatherManager()
var team = "leer"
static var number = 1
#IBAction func bayernMunchen(_ sender: UIButton) {
team = "bayernMunchen"
}
#IBAction func borussiaDortmund(_ sender: UIButton) {
team = "borussiaDortmund"
}
#IBAction func schalke(_ sender: UIButton) {
team = "schalke"
}
#IBAction func pressed(_ sender: UIButton) {
switch team {
case "bayernMunchen":
ThirdScreenViewController.number = 46
case "borussiaDortmund":
ThirdScreenViewController.number = 41
case "schalke":
ThirdScreenViewController.number = 45
default: print(8)
}
let storyBoard : UIStoryboard = UIStoryboard(name: "Main", bundle:nil)
let nextViewController = storyBoard.instantiateViewController(withIdentifier: "WeatherViewController") as! WeatherViewController
self.present(nextViewController, animated:true, completion:nil)
}
}
In an other swift (not a View Controller) file I have a function which takes number and does something with it.
import Foundation
import UIKit
var TeamOne = ""
var TeamTwo = ""
var ScoreOne = ""
var ScoreTwo = ""
var TeamThree = ""
var TeamFour = ""
var ScoreThree = ""
var ScoreFour = ""
var cityName = ThirdScreenViewController.number
struct WeatherManager {
let weatherURL = "https://livescore-api.com/api-client/teams/matches.json?number=10&team_id=19&key=d33FTnnd6qwvEmjz&secret=BbO3REPYFXvb7fpkit0cQnpXNWssiL1U&number=3&team_id=\(cityName)"
func fetchWeather () {
let urlString = "\(weatherURL)"
perfromRequest(urlString: urlString)
}
func perfromRequest(urlString: String)
{
//1.Url erstellen
if let url = URL(string: urlString) {
//2. URLSession starten
let session = URLSession(configuration: .default)
//3. Give session a task
let task = session.dataTask(with: url) { (gettingInfo, response, error) in
if error != nil{
print(error!)
return
}
if let safeFile = gettingInfo {
self.parseJSON(weatherFile: safeFile)
}
}
//4. Start the task
task.resume()
}
}
//Das Ergebnis von oben wird hier ausgegeben
func parseJSON(weatherFile: Data) {
let decoder = JSONDecoder()
do{
let decodedFile = try decoder.decode(WeatherFile.self, from: weatherFile)
TeamOne = decodedFile.data[0].home_name
ScoreOne = decodedFile.data[0].score
TeamTwo = decodedFile.data[0].away_name
ScoreTwo = decodedFile.data[0].score
TeamThree = decodedFile.data[1].home_name
ScoreThree = decodedFile.data[1].score
TeamFour = decodedFile.data[1].away_name
ScoreFour = decodedFile.data[1].score
} catch {
print(error)
}
}
}
In a third swift file I use this func weatherManager.fetchWeather() to call what happens in my second swift file.
But here is the problem. It takes the variable number with the default value 1 and not with the value 41/46/45. What am I doing wrong?
Basically global variables outside of any class and static variables to share data is bad practice.
Apart from that to get the team ID dynamically delete the line
var cityName = ThirdScreenViewController.number
In the struct replace
let weatherURL = "https://livescore-api.com/api-client/teams/matches.json?number=10&team_id=19&key=d33FTnnd6qwvEmjz&secret=BbO3REPYFXvb7fpkit0cQnpXNWssiL1U&number=3&team_id=\(cityName)"
with
let weatherURL = "https://livescore-api.com/api-client/teams/matches.json?number=10&team_id=19&key=d33FTnnd6qwvEmjz&secret=BbO3REPYFXvb7fpkit0cQnpXNWssiL1U&number=3&team_id="
and
let urlString = "\(weatherURL)"
with
let urlString = weatherURL + String(ThirdScreenViewController.number)
Note: Consider to rename the weather related stuff to the team related stuff

How can I record AVDepthData video and save in the gallery?

I am developing an application to record RGB-D sequences with the iPhone by using the DualRearCamera or the TrueDepthCamera. I can capture and visualize the RGB frame and depth frames and I developed a version where I can compress this data and save in the internal files of the iPhone. Nevertheless, my idea is to save both sequences (RGB and depth map sequences) in the gallery, but I am having problems to use AVAssetWritter and create a depth map video.
I am using the iPhone X, Xcode 10.2.1 and swift 5
import UIKit
import AVFoundation
import AssetsLibrary
var noMoreSpace = false
class ViewController: UIViewController{
#IBOutlet weak var previewView: UIImageView!
#IBOutlet weak var timeLabel: UILabel!
#IBOutlet weak var previewModeControl: UISegmentedControl!
let session = AVCaptureSession()
let dataOutputQueue = DispatchQueue(label: "video data queue")
let videoOutput = AVCaptureVideoDataOutput()
let movieOutput = AVCaptureMovieFileOutput()
let depthOutput = AVCaptureDepthDataOutput()
let depthCapture = DepthCapture()
var previewLayer = AVCaptureVideoPreviewLayer()
var inputDevice: AVCaptureDeviceInput!
let videoDeviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera, .builtInTrueDepthCamera], mediaType: .video, position: .unspecified)
var Timestamp: String {
let currentDate = NSDate()
let dateFormatter = DateFormatter()
dateFormatter.dateFormat = "ddMM_HHmmss"
return "\(dateFormatter.string(from: currentDate as Date))"
}
var isRecording = false
var time = 0
var timer = Timer()
enum PreviewMode: Int {
case original
case depth
}
var previewMode = PreviewMode.original
var depthMap: CIImage?
var scale: CGFloat = 0.0
//let sessionQueue = DispatchQueue(label: "session queue")
override func viewDidLoad() {
super.viewDidLoad()
timeLabel.isHidden = true //TODO: Disable the rest of the UI
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
configureCaptureSession()
session.startRunning()
}
func configureCaptureSession() {
session.beginConfiguration()
let camera = AVCaptureDevice.default(.builtInTrueDepthCamera, for: .video, position: .unspecified)!
do {
let cameraInput = try AVCaptureDeviceInput(device: camera)
if session.canAddInput(cameraInput){
session.sessionPreset = .vga640x480
session.addInput(cameraInput)
self.inputDevice = cameraInput
}
if session.canAddOutput(videoOutput){
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
session.addOutput(videoOutput)
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
//previewLayer = AVCaptureVideoPreviewLayer(session: session)
//previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
//previewLayer.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
//previewView.layer.addSublayer(previewLayer)
//previewLayer.position = CGPoint(x: self.previewView.frame.width / 2, y: self.previewView.frame.height / 2)
//previewLayer.bounds = previewView.frame
}
//Add Depth output to the session
if session.canAddOutput(depthOutput){
session.addOutput(depthOutput)
depthOutput.setDelegate(self, callbackQueue: dataOutputQueue)
depthOutput.isFilteringEnabled = true
let depthConnection = depthOutput.connection(with: .depthData)
depthConnection?.videoOrientation = .portrait
}
/*if session.canAddOutput(movieOutput){
session.addOutput(movieOutput)
}*/
} catch {
print("Error")
}
let outputRect = CGRect(x: 0, y: 0, width: 1, height: 1)
let videoRect = videoOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
let depthRect = depthOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
// Calculate the scaling factor between videoRect and depthRect
scale = max(videoRect.width, videoRect.height) / max(depthRect.width, depthRect.height)
// Change the AVCaptureDevice configuration, so you need to lock it
do{
try camera.lockForConfiguration()
// Set the AVCaptureDevice‘s minimum frame duration (which is the inverse of the maximum frame rate) to be equal to the supported frame rate of the depth data
if let frameDuration = camera.activeDepthDataFormat?.videoSupportedFrameRateRanges.first?.minFrameDuration{
camera.activeVideoMinFrameDuration = frameDuration
}
// Unlock the configuration you locked
camera.unlockForConfiguration()
}catch{
fatalError(error.localizedDescription)
}
session.commitConfiguration()
}
#IBAction func startStopRecording(_ sender: Any) {
if isRecording{
stopRecording()
} else {
startRecording()
}
}
func startRecording(){
timeLabel.isHidden = false
timer = Timer.scheduledTimer(timeInterval: 1, target: self, selector: #selector(ViewController.timerAction), userInfo: nil, repeats: true)
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
let flagTime = Timestamp
let auxStr = flagTime+"_output.mp4"
let fileUrl = paths[0].appendingPathComponent(auxStr)
depthCapture.prepareForRecording(timeFlag: flagTime)
movieOutput.startRecording(to: fileUrl, recordingDelegate: self)
print(fileUrl.absoluteString)
print("Recording started")
self.isRecording = true
}
func stopRecording(){
timeLabel.isHidden = true
timer.invalidate()
time = 0
timeLabel.text = "0"
movieOutput.stopRecording()
print("Stopped recording!")
self.isRecording = false
do {
try depthCapture.finishRecording(success: { (url: URL) -> Void in
print(url.absoluteString)
})
} catch {
print("Error while finishing depth capture.")
}
}
#objc func timerAction() {
time += 1
timeLabel.text = String(time)
}
#IBAction func previeModeChanged(_ sender: UISegmentedControl) {
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
}
#IBAction func switchCamera(_ sender: Any) {
let currentDevice = self.inputDevice.device
let currentPosition = currentDevice.position
let preferredPosition: AVCaptureDevice.Position
let preferredDeviceType: AVCaptureDevice.DeviceType
let devices = self.videoDeviceDiscoverySession.devices
var newVideoDevice: AVCaptureDevice? = nil
switch currentPosition {
case .unspecified, .front:
preferredPosition = .back
preferredDeviceType = .builtInDualCamera
case .back:
preferredPosition = .front
preferredDeviceType = .builtInTrueDepthCamera
#unknown default:
preferredPosition = .back
preferredDeviceType = .builtInDualCamera
}
// First, seek a device with both the preferred position and device type. Otherwise, seek a device with only the preferred position. ENTENDER MEJOR LQS CONDICIONES
if let device = devices.first(where: { $0.position == preferredPosition && $0.deviceType == preferredDeviceType }) {
newVideoDevice = device
} else if let device = devices.first(where: { $0.position == preferredPosition }) {
newVideoDevice = device
}
if let videoDevice = newVideoDevice {
do {
let cameraInput = try AVCaptureDeviceInput(device: videoDevice)
self.session.beginConfiguration()
self.session.removeInput(self.inputDevice)
if self.session.canAddInput(cameraInput) {
session.sessionPreset = .vga640x480
self.session.addInput(cameraInput)
self.inputDevice = cameraInput
}else {
self.session.addInput(self.inputDevice)
}
self.session.commitConfiguration()
} catch{
print("Error occurred while creating video device input: \(error)")
}
}
}
}
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let image = CIImage(cvPixelBuffer: pixelBuffer!)
let previewImage: CIImage
switch previewMode {
case .original:
previewImage = image
case .depth:
previewImage = depthMap ?? image
}
let displayImage = UIImage(ciImage: previewImage)
DispatchQueue.main.async {[weak self] in self?.previewView.image = displayImage}
}
}
extension ViewController: AVCaptureDepthDataOutputDelegate{
func depthDataOutput(_ output: AVCaptureDepthDataOutput, didOutput depthData: AVDepthData, timestamp: CMTime, connection: AVCaptureConnection) {
var convertedDepth: AVDepthData
// Ensure the depth data is the format you need: 32 bit FP disparity.???
if depthData.depthDataType != kCVPixelFormatType_DepthFloat16{
convertedDepth = depthData.converting(toDepthDataType: kCVPixelFormatType_DepthFloat32)
}else{
convertedDepth = depthData
}
// You save the depth data map from the AVDepthData object as a CVPixelBuffer
let pixelBuffer = convertedDepth.depthDataMap
//Using an extension, you then clamp the pixels in the pixel buffer to keep them between 0.0 and 1.0.
pixelBuffer.clamp()
// Convert the pixel buffer into a CIImage
let depthMap = CIImage(cvPixelBuffer: pixelBuffer)
// You store depthMap in a class variable for later use
DispatchQueue.main.async {
[weak self] in self?.depthMap = depthMap
}
}
}

How can I reuse a variable later on in Swift

I'm trying to capture a user input (textfield + button) and reuse the result later in the program but I don't know how to do that.
import UIKit
class ViewController: UIViewController {
#IBOutlet weak var resultLabel: UILabel!
#IBOutlet weak var moneyTextField: UITextField!
#IBAction func convert(_ sender: Any) {
let convertion:Double = Double(moneyTextField.text!)!
print(convertion)
}
override func viewDidLoad() {
super.viewDidLoad()
let url = URL(string: "https://www.x-rates.com/calculator/?from=EUR&to=USD&amount=1")!
let request = NSMutableURLRequest(url : url)
let task = URLSession.shared.dataTask(with: request as URLRequest) {
data, response, error in
var rateValue:Double = 0.0;
if let error = error {
print(error)
} else {
if let unwrappedData = data {
let dataString = NSString(data: unwrappedData, encoding: String.Encoding.utf8.rawValue)
var stringSeperator = "<span class=\"ccOutputRslt\">"
if let contentArray = dataString?.components(separatedBy: stringSeperator){
if contentArray.count > 0 {
stringSeperator = "<span"
let newContentArray = contentArray[1].components(separatedBy: stringSeperator)
if newContentArray.count > 0 {
rateValue = Double(newContentArray[0])!
print(newContentArray[0])
}
}
}
}
}
//
print("Rate is \(rateValue)");
DispatchQueue.main.sync(execute: {
self.resultLabel.text = "the value of the dollar is " + String(rateValue)
}
)}
task.resume()
}
}
What I want to do is take the let convertion and multiply it by rateValue at the end of the code. I tried different thing but without any results.
after the advice from Joakim Danielson
I did that :
import UIKit
class ViewController: UIViewController {
var fxRate: Double?
#IBOutlet weak var resultLabel: UILabel!
#IBOutlet weak var moneyTextField: UITextField!
#IBAction func convert(_ sender: Any) {
let convertion:Double = Double(moneyTextField.text!)!
print(convertion)
var convertedAmount = 0.0
if let rate = fxRate, let money = Double(moneyTextField.text) {
convertedAmount = rate * money
}
print(convertedAmount)
}
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
let url = URL(string: "https://www.x-rates.com/calculator/?from=EUR&to=USD&amount=1")!
let request = NSMutableURLRequest(url : url)
let task = URLSession.shared.dataTask(with: request as URLRequest) {
data, response, error in
var rateValue:Double = 0.0;
if let error = error {
print(error)
} else {
if let unwrappedData = data {
let dataString = NSString(data: unwrappedData, encoding: String.Encoding.utf8.rawValue)
var stringSeperator = "<span class=\"ccOutputRslt\">"
if let contentArray = dataString?.components(separatedBy: stringSeperator){
if contentArray.count > 0 {
stringSeperator = "<span"
let newContentArray = contentArray[1].components(separatedBy: stringSeperator)
if newContentArray.count > 0 {
rateValue = Double(newContentArray[0])!
print(newContentArray[0])
rateValue = Double(newContentArray[0])!
self.fxRate = rateValue
}
}
}
}
}
//
print("Rate is \(rateValue)");
DispatchQueue.main.sync(execute: {
self.resultLabel.text = "the value of the dollar is " + String(rateValue)
}
)}
task.resume()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
but I have the error : Cannot invoke initializer for type 'Double' with an argument list of type '(String?)' on line 26. Can you please help me? thx!
create a variable outside of your function
var anyVariableYouWantToAccessLater: Double?
And use this variable anywhere you want.
Since you're downloading the rate during viewDidLoad I am assuming this is what you want to keep.
Add a new property to the class
class ViewController: UIViewController {
var fxRate: Double?
...
In viewDidLoad update this property with the downloaded value
rateValue = Double(newContentArray[0])!
fxRate = rateValue
In the convert func (or wherever you want to use the rate)
#IBAction func convert(_ sender: Any) {
var convertedAmount = 0.0
if let rate = fxRate, let money = Double(moneyTextField.text ?? "0") {
convertedAmount = rate * money
}
print(convertedAmount)
}

Objects Track using vision framework in iOS 11

I want to detect object and track that object using vision framework. I am successfully done with detect objects and little bit with tracking also but I don't get so much accuracy with tracking.
I want much more accuracy while converting frames as its frequently lost the accuracy while track the objects.
Please check the below code for detect and track the objects:
import UIKit
import AVFoundation
import Vision
class ViewController: UIViewController {
private lazy var captureSession: AVCaptureSession = {
let session = AVCaptureSession()
session.sessionPreset = AVCaptureSession.Preset.photo
guard let backCamera = AVCaptureDevice.default(for: .video),
let input = try? AVCaptureDeviceInput(device: backCamera) else
{
return session
}
session.addInput(input)
return session
}()
private lazy var cameraLayer: AVCaptureVideoPreviewLayer =
AVCaptureVideoPreviewLayer(session: self.captureSession)
private let handler = VNSequenceRequestHandler()
fileprivate var lastObservation: VNDetectedObjectObservation?
lazy var highlightView: UIView = {
let view = UIView()
view.layer.borderColor = UIColor.red.cgColor
view.layer.borderWidth = 4
view.backgroundColor = .clear
return view
}()
override func viewDidLoad() {
super.viewDidLoad()
view.layer.addSublayer(cameraLayer)
view.addSubview(highlightView)
let output = AVCaptureVideoDataOutput()
output.setSampleBufferDelegate(self, queue: DispatchQueue(label:
"queue"))
captureSession.addOutput(output)
captureSession.startRunning()
let tapGestureRecognizer = UITapGestureRecognizer(target: self,
action: #selector(tapAction))
view.addGestureRecognizer(tapGestureRecognizer)
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
cameraLayer.frame = view.bounds
}
// MARK: - Actions
#objc private func tapAction(recognizer: UITapGestureRecognizer) {
highlightView.frame.size = CGSize(width: 120, height: 120)
highlightView.center = recognizer.location(in: view)
let originalRect = highlightView.frame
var convertedRect =
cameraLayer.metadataOutputRectConverted(fromLayerRect:
originalRect)
convertedRect.origin.y = 1 - convertedRect.origin.y
lastObservation = VNDetectedObjectObservation(boundingBox:
convertedRect)
}
fileprivate func handle(_ request: VNRequest, error: Error?) {
DispatchQueue.main.async {
guard let newObservation = request.results?.first as?
VNDetectedObjectObservation else {
return
}
self.lastObservation = newObservation
var transformedRect = newObservation.boundingBox
transformedRect.origin.y = 1 - transformedRect.origin.y
let convertedRect =
self.cameraLayer.layerRectConverted(fromMetadataOutputRect:
transformedRect)
self.highlightView.frame = convertedRect
}
}
}
extension ViewController:
AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer:
CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer =
CMSampleBufferGetImageBuffer(sampleBuffer),
let observation = lastObservation else {
return
}
let request = VNTrackObjectRequest(detectedObjectObservation:
observation) { [unowned self] request, error in
self.handle(request, error: error)
}
request.trackingLevel = .accurate
do {
try handler.perform([request], on: pixelBuffer)
}
catch {
print(error)
}
}
}
Any help will be appreciated!!
Thanks.
I am not so good at vision and core ml, but apparently your code looks fine. One thing you can do is check when vision does not get any tracking in the buffer, you have to mark its property isLastFrame true if tracking request confidence value falls to 0.
if !trackingRequest.isLastFrame {
if observation.confidence > 0.7 {
trackingRequest.inputObservation = observation
} else {
trackingRequest.isLastFrame = true
}
newTrackingRequests.append(trackingRequest)
}
This way its easy to find out whether vision tracking request lost tracking object or it just tracking the wrong object.