Identifying memory leak in Swift GCD.async call - swift

Some background about my app: I am drawing a map. When the user moves the map I perform a database query. I first do an rTree query to find the features that would be draw in the current viewport. Once I have those IDs I perform a second database query to extract the features (geojson) from the database. I do a quick check to see if the item already has been drawn, if not I do a addChild to render the feature on the map. I want to do these database looks up in the background via GCD so the user can move the map smoothly. I've implemented this but the memory usage quickly grows to 1gb, whereas if I do all the work in the main thread it uses around 250mb (acceptable for me). I'm assuming something is not being cleaned up because of the closure use. Any insight into the cause of the memory leak is appreciated.
public func drawItemsInBox(boundingBox: [Double]) {
DispatchQueue.global(qos: .background).async { [weak self] in
guard let self = self else {
return
}
var drawItems: [Int64] = []
let table = Table("LNDARE_XS")
let tableRTree = Table("LNDARE_XS_virtual")
let coords = Expression<String?>("coords")
let foid = Expression<String>("foid")
let rTree = Expression<Int64>("rTree")
let minX = Expression<Double>("minX")
let maxX = Expression<Double>("maxX")
let minY = Expression<Double>("minY")
let maxY = Expression<Double>("maxY")
let id = Expression<Int64>("id")
// find all the features to draw via an rTree query
for row in try! self.db.prepare(tableRTree.filter(maxX >= boundingBox[0] && minX <= boundingBox[1] && maxY >= boundingBox[2] && minY <= boundingBox[3])) {
drawItems.append(row[id])
}
do {
// get all the features geojson data
let query = table.filter(drawItems.contains(rTree))
for row in try self.db.prepare(query) {
// skip drawing if the feature already exists on the map
if self.featureTracking["LNDARE_XS"]?[Int64(row[foid])!] == nil {
// convert the database string to an array of coords
var toBeRendered:[CGPoint] = []
let coordsArray = row[coords]!.components(separatedBy: ",")
for i in 0...(coordsArray.count / 2) - 1 {
toBeRendered.append(CGPoint(x: (Double(coordsArray[i*2])!), y: (Double(coordsArray[(i*2)+1])!)))
}
let linearShapeNode = SKShapeNode(points: &toBeRendered, count: toBeRendered.count)
linearShapeNode.position = CGPoint(x: self.frame.midX, y: self.frame.midY)
linearShapeNode.lineWidth = 0
linearShapeNode.fillColor = NSColor.black
// append the featureId for tracking and call addChild to draw
self.scaleLayer.addChild(linearShapeNode)
self.featureTracking["LNDARE_XS"]?[Int64(row[foid])!] = linearShapeNode
}
}
} catch {
// catch
}
}
}

Maybe change toBeRendered can save some:
var toBeRendered:[CGPoint] = []
for row in try self.db.prepare(query) {
// skip drawing if the feature already exists on the map
if self.featureTracking["LNDARE_XS"]?[Int64(row[foid])!] == nil {
// convert the database string to an array of coords
toBeRendered.removeAll()
let coordsArray = row[coords]!.components(separatedBy: ",")
for i in 0...(coordsArray.count / 2) - 1 {
toBeRendered.append(CGPoint(x: (Double(coordsArray[i*2])!), y: (Double(coordsArray[(i*2)+1])!)))
}

Maybe try using an auto release pool since you are not on the main thread
public func drawItemsInBox(boundingBox: [Double]) {
DispatchQueue.global(qos: .background).async { [weak self] in
guard let self = self else {
return
}
var drawItems: [Int64] = []
let table = Table("LNDARE_XS")
let tableRTree = Table("LNDARE_XS_virtual")
let coords = Expression<String?>("coords")
let foid = Expression<String>("foid")
let rTree = Expression<Int64>("rTree")
let minX = Expression<Double>("minX")
let maxX = Expression<Double>("maxX")
let minY = Expression<Double>("minY")
let maxY = Expression<Double>("maxY")
let id = Expression<Int64>("id")
// find all the features to draw via an rTree query
for row in try! self.db.prepare(tableRTree.filter(maxX >= boundingBox[0] && minX <= boundingBox[1] && maxY >= boundingBox[2] && minY <= boundingBox[3])) {
drawItems.append(row[id])
}
do {
// get all the features geojson data
let query = table.filter(drawItems.contains(rTree))
for row in try self.db.prepare(query) {
autoreleasepool{
// skip drawing if the feature already exists on the map
if self.featureTracking["LNDARE_XS"]?[Int64(row[foid])!] == nil {
// convert the database string to an array of coords
var toBeRendered:[CGPoint] = []
let coordsArray = row[coords]!.components(separatedBy: ",")
for i in 0...(coordsArray.count / 2) - 1 {
toBeRendered.append(CGPoint(x: (Double(coordsArray[i*2])!), y: (Double(coordsArray[(i*2)+1])!)))
}
let linearShapeNode = SKShapeNode(points: &toBeRendered, count: toBeRendered.count)
linearShapeNode.position = CGPoint(x: self.frame.midX, y: self.frame.midY)
linearShapeNode.lineWidth = 0
linearShapeNode.fillColor = NSColor.black
// append the featureId for tracking and call addChild to draw
self.scaleLayer.addChild(linearShapeNode)
self.featureTracking["LNDARE_XS"]?[Int64(row[foid])!] = linearShapeNode
}
}
}
} catch {
// catch
}
}
}

Related

Get snapshot from AVCaptureSession contaning Visionkit face detection elements

I use AVCaptureSession to setup a camera view and using vision kit to detect and add a rectangular on the face.
Here is how I can do it
override func viewDidLoad() {
super.viewDidLoad()
self.prepareVisionRequest()
}
fileprivate func prepareVisionRequest() {
//self.trackingRequests = []
var requests = [VNTrackObjectRequest]()
let faceDetectionRequest = VNDetectFaceRectanglesRequest(completionHandler: { (request, error) in
if error != nil {
print("FaceDetection error: \(String(describing: error)).")
}
guard let faceDetectionRequest = request as? VNDetectFaceRectanglesRequest,
let results = faceDetectionRequest.results else {
return
}
DispatchQueue.main.async {
// Add the observations to the tracking list
for observation in results {
let faceTrackingRequest = VNTrackObjectRequest(detectedObjectObservation: observation)
faceTrackingRequest.trackingLevel = .fast
requests.append(faceTrackingRequest)
}
self.trackingRequests = requests
}
})
// Start with detection. Find face, then track it.
self.detectionRequests = [faceDetectionRequest]
self.sequenceRequestHandler = VNSequenceRequestHandler()
self.setupVisionDrawingLayers()
}
// MARK: Drawing Vision Observations
fileprivate func setupVisionDrawingLayers() {
let captureDeviceResolution = self.captureDeviceResolution
let captureDeviceBounds = CGRect(x: 0,
y: 0,
width: captureDeviceResolution.width,
height: captureDeviceResolution.height)
let captureDeviceBoundsCenterPoint = CGPoint(x: captureDeviceBounds.midX,
y: captureDeviceBounds.midY)
let normalizedCenterPoint = CGPoint(x: 0.5, y: 0.5)
guard let rootLayer = self.rootLayer else {
self.presentErrorAlert(message: "view was not property initialized")
return
}
let overlayLayer = CALayer()
overlayLayer.name = "DetectionOverlay"
overlayLayer.masksToBounds = true
overlayLayer.anchorPoint = normalizedCenterPoint
overlayLayer.bounds = captureDeviceBounds
overlayLayer.position = CGPoint(x: rootLayer.bounds.midX, y: rootLayer.bounds.midY)
let faceRectangleShapeLayer = CAShapeLayer()
faceRectangleShapeLayer.name = "RectangleOutlineLayer"
faceRectangleShapeLayer.bounds = captureDeviceBounds
faceRectangleShapeLayer.anchorPoint = normalizedCenterPoint
faceRectangleShapeLayer.position = captureDeviceBoundsCenterPoint
faceRectangleShapeLayer.fillColor = UIColor.white.withAlphaComponent(0.9).cgColor
// faceLandmarksShapeLayer.strokeColor = UIColor.white.withAlphaComponent(0.7).cgColor
faceRectangleShapeLayer.lineWidth = 5
faceRectangleShapeLayer.shadowOpacity = 0.7
faceRectangleShapeLayer.shadowRadius = 5
let faceLandmarksShapeLayer = CAShapeLayer()
faceLandmarksShapeLayer.name = "FaceLandmarksLayer"
faceLandmarksShapeLayer.bounds = captureDeviceBounds
faceLandmarksShapeLayer.anchorPoint = normalizedCenterPoint
faceLandmarksShapeLayer.position = captureDeviceBoundsCenterPoint
faceLandmarksShapeLayer.fillColor = nil
faceLandmarksShapeLayer.strokeColor = nil
//
overlayLayer.addSublayer(faceRectangleShapeLayer)
faceRectangleShapeLayer.addSublayer(faceLandmarksShapeLayer)
rootLayer.addSublayer(overlayLayer)
self.detectionOverlayLayer = overlayLayer
self.detectedFaceRectangleShapeLayer = faceRectangleShapeLayer
self.detectedFaceLandmarksShapeLayer = faceLandmarksShapeLayer
self.updateLayerGeometry()
}
Now, I'm trying three ways to take snapshots
1- using UIGraphicsImageRenderer, it shows only the rectangular on the face and the camera view in not visible - it's black
2- Take image from captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) - the image from the buffer shows only the camera view, no rectangular
3- use AVCapturePhotoCaptureDelegate to capture photo from AVCaptureSession, the captured photo shows only the camera view, no rectangular
Could you please help me to take snapshot that contains both camera view and the rectangular! Thanks

How to display large amount of GMSPolylines without maxing out CPU usage?

I'm making an app that displays bus routes using the NextBus API and Google Maps. However, I'm having an issue with CPU usage that I think is being caused by the amount of GMSPolylines on the map. The route is displayed by an array of polylines made up of the points given by NextBus for a given route. When the polylines are added to the map and the GMSCamera is overviewing the entire route, the CPU on the simulator (iPhone X) maxes out at 100%. When zoomed in on a particular section of the route, however, the CPU usage goes down to ~2%.
Map Screenshot: https://i.imgur.com/jLmN26e.png
Performance: https://i.imgur.com/nUbIv5w.png
The NextBus API returns route information including the route of a specific bus path. Here's an small example of the data that I'm working with:
Route: {
"path": [Path]
}
Path: {
"points:" [Coordinate]
}
Coordinate: {
"lat": Float,
"lon": Float
}
And here's my method that creates the polylines from the data. All in all there are on average ~700 coordinates spread across ~28 polylines (each path object) for a route. Keep in mind I'm not displaying multiple routes on one page, I'm only displaying one at a time.
func buildRoute(routePath: [Path?]) -> [GMSPolyline] {
var polylines: [GMSPolyline] = []
for path in routePath {
let path = GMSMutablePath()
guard let coords = path?.points else {continue}
for coordinate in coords {
// Safely unwrap latitude strings and convert them to doubles.
guard let latStr = coordinate?.lat,
let lonStr = coordinate?.lon else {
continue
}
guard let latOne = Double(latStr),
let lonOne = Double(lonStr) else {
continue
}
// Create location coordinates.
let pointCoordinatie = CLLocationCoordinate2D(latitude: latOne, longitude: lonOne)
path.add(pointCoordinatie)
}
let line = GMSPolyline(path: path)
line.strokeWidth = 6
line.strokeColor = UIColor(red: 0/255, green: 104/255, blue: 139/255, alpha: 1.0)
polylines.append(line)
}
return polylines
}
Finally here is my method that adds the polylines to the map:
fileprivate func buildRoute(routeConfig: RouteConfig?) {
if let points = routeConfig?.route?.path {
let polylines = RouteBuiler.shared.buildRoute(routePath: points)
DispatchQueue.main.async {
// Remove polylines from map if there are any.
for line in self.currentRoute {
line.map = nil
}
// Set new current route and add it to the map.
self.currentRoute = polylines
for line in self.currentRoute {
line.map = self.mapView
}
}
}
}
Is there a problem with how I'm constructing the polylines? Or are there simply too many coordinates?
I ran into this exact problem. It is quite an odd bug -- when you go over a certain threshold of polylines, the CPU suddenly pegs to 100%.
I discovered that GMSPolygon does not have this problem. So I switched over all of GMSPolyline to GMSPolygon.
To get the correct stroke width, I am using the following code to create a polygon that traces the outline of a polyline at a given stroke width. My calculation requires the LASwift linear algebra library.
https://github.com/AlexanderTar/LASwift
import CoreLocation
import LASwift
import GoogleMaps
struct Segment {
let from: CLLocationCoordinate2D
let to: CLLocationCoordinate2D
}
enum RightLeft {
case right, left
}
// Offset the given path to the left or right by the given distance
func offsetPath(rightLeft: RightLeft, path: [CLLocationCoordinate2D], offset: Double) -> [CLLocationCoordinate2D] {
var offsetPoints = [CLLocationCoordinate2D]()
var prevSegment: Segment!
for i in 0..<path.count {
// Test if this is the last point
if i == path.count-1 {
if let to = prevSegment?.to {
offsetPoints.append(to)
}
continue
}
let from = path[i]
let to = path[i+1]
// Skip duplicate points
if from.latitude == to.latitude && from.longitude == to.longitude {
continue
}
// Calculate the miter corner for the offset point
let segmentAngle = -atan2(to.latitude - from.latitude, to.longitude - from.longitude)
let sinA = sin(segmentAngle)
let cosA = cos(segmentAngle)
let rotate =
Matrix([[cosA, -sinA, 0.0],
[sinA, cosA, 0.0],
[0.0, 0.0, 1.0]])
let translate =
Matrix([[1.0, 0.0, 0.0 ],
[0.0, 1.0, rightLeft == .left ? offset : -offset ],
[0.0, 0.0, 1.0]])
let mat = inv(rotate) * translate * rotate
let fromOff = mat * Matrix([[from.x], [from.y], [1.0]])
let toOff = mat * Matrix([[to.x], [to.y], [1.0]])
let offsetSegment = Segment(
from: CLLocationCoordinate2D(latitude: fromOff[1,0], longitude: fromOff[0,0]),
to: CLLocationCoordinate2D(latitude: toOff[1,0], longitude: toOff[0,0]))
if prevSegment == nil {
prevSegment = offsetSegment
offsetPoints.append(offsetSegment.from)
continue
}
// Calculate line intersection
guard let intersection = getLineIntersection(line0: prevSegment, line1: offsetSegment, segment: false) else {
prevSegment = offsetSegment
continue
}
prevSegment = offsetSegment
offsetPoints.append(intersection)
}
return offsetPoints
}
// Returns the intersection point if the line segments intersect, otherwise nil
func getLineIntersection(line0: Segment, line1: Segment, segment: Bool) -> CLLocationCoordinate2D? {
return getLineIntersection(p0: line0.from, p1: line0.to, p2: line1.from, p3: line1.to, segment: segment)
}
// https://stackoverflow.com/questions/563198/how-do-you-detect-where-two-line-segments-intersect
// Returns the intersection point if the line segments intersect, otherwise nil
func getLineIntersection(p0: CLLocationCoordinate2D, p1: CLLocationCoordinate2D, p2: CLLocationCoordinate2D, p3: CLLocationCoordinate2D, segment: Bool) -> CLLocationCoordinate2D? {
let s1x = p1.longitude - p0.longitude
let s1y = p1.latitude - p0.latitude
let s2x = p3.longitude - p2.longitude
let s2y = p3.latitude - p2.latitude
let numerator = (s2x * (p0.latitude - p2.latitude) - s2y * (p0.longitude - p2.longitude))
let denominator = (s1x * s2y - s2x * s1y)
if denominator == 0.0 {
return nil
}
let t = numerator / denominator
if segment {
let s = (s1y * (p0.longitude - p2.longitude) + s1x * (p0.latitude - p2.latitude)) / (s1x * s2y - s2x * s1y)
guard (s >= 0 && s <= 1 && t >= 0 && t <= 1) else {
return nil
}
}
return CLLocationCoordinate2D(latitude: p0.latitude + (t * s1y), longitude: p0.longitude + (t * s1x))
}
// The path from NextBus
let path: CLLocationCoordinate2D = pathFromNextBus()
// The desired width of the polyline
let strokeWidth: Double = desiredPolylineWidth()
let polygon: GMSPolygon
do {
let polygonPath = GMSMutablePath()
let w = strokeWidth / 2.0
for point in offsetPath(rightLeft: .left, path: route.offsetPath, offset: w) {
polygonPath.add(CLLocationCoordinate2D(latitude: point.latitude, longitude: point.longitude))
}
for point in offsetPath(rightLeft: .right, path: route.offsetPath, offset: w).reversed() {
polygonPath.add(CLLocationCoordinate2D(latitude: point.latitude, longitude: point.longitude))
}
polygon = GMSPolygon(path: polygonPath)
polygon.strokeWidth = 0.0
}

How Do I Process an Image File to fit buffer dimensions for Vision Framework on MacOS?

I'm trying to make something simple to test Vision Framework on MacOS.
I tried to modify code from this tutorial to use a single image from screenshot instead of camera feed.
https://www.appcoda.com/vision-framework-introduction/
However, I get this error:
Error Domain=com.apple.vis Code=3 "Failed to create image for processing due to invalid requested buffer dimensions"
Is it because screenshot image doesn't fit certain specification? Do I need to preprocess the file?
If so, how can I process it in order to fit the dimensions?
My testing code is below.
Thanks!
import Cocoa
import Vision
class ViewController: NSViewController {
var requests = [VNRequest]()
func start() {
let textRequest = VNDetectTextRectanglesRequest(completionHandler: self.detectTextHandler)
textRequest.reportCharacterBoxes = true
self.requests = [textRequest]
let url = URL(fileURLWithPath:NSString(string:"~/Screenshot.png").expandingTildeInPath)
let imageRequestHandler = VNImageRequestHandler(url:url)
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
func detectTextHandler(request: VNRequest, error: Error?) {
guard let observations = request.results else {
print("no result")
return
}
let result = observations.map({$0 as? VNTextObservation})
DispatchQueue.main.async() {
for region in result {
guard let rg = region else {
continue
}
self.highlightWord(box: rg)
if let boxes = region?.characterBoxes {
for characterBox in boxes {
self.highlightLetters(box: characterBox)
}
}
}
}
}
func highlightWord(box: VNTextObservation) {
guard let boxes = box.characterBoxes else {
return
}
var maxX: CGFloat = 9999.0
var minX: CGFloat = 0.0
var maxY: CGFloat = 9999.0
var minY: CGFloat = 0.0
for char in boxes {
if char.bottomLeft.x < maxX {
maxX = char.bottomLeft.x
}
if char.bottomRight.x > minX {
minX = char.bottomRight.x
}
if char.bottomRight.y < maxY {
maxY = char.bottomRight.y
}
if char.topRight.y > minY {
minY = char.topRight.y
}
}
let xCord = maxX
let yCord = (1 - minY)
let width = (minX - maxX)
let height = (minY - maxY)
let frame = CGRect(x: xCord, y: yCord, width: width, height: height)
print("Word: \(frame)")
}
func highlightLetters(box: VNRectangleObservation) {
let xCord = box.topLeft.x
let yCord = (1 - box.topLeft.y)
let width = (box.topRight.x - box.bottomLeft.x)
let height = (box.topLeft.y - box.bottomLeft.y)
let frame = CGRect(x: xCord, y: yCord, width: width, height: height)
print("Letter: \(frame)")
}
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
start()
}
override var representedObject: Any? {
didSet {
// Update the view, if already loaded.
}
}
}

iOS Charts - Display highest and lowest value for CandleStick charts

I'm trying to create a candlestick chart using Charts
As you guys can notice from my screenshot, the chart only shows the highest and lowest values instead of displaying the values for all the candles. Is there any way I can implement that with the Charts framework?
Thanks in advance.
If you want to display only highest and lowest values, you need to implement you own renderer inherited from CandleStickChartRenderer. In fact you just need to override one function drawValues(context: CGContext).
I have made some example which contain a hundred lines of code, but in fact my custom code contains about thirty lines.
class MyCandleStickChartRenderer: CandleStickChartRenderer {
private var _xBounds = XBounds() // Reusable XBounds object
private var minValue: Double
private var maxValue: Double
// New constructor
init (view: CandleStickChartView, minValue: Double, maxValue: Double) {
self.minValue = minValue
self.maxValue = maxValue
super.init(dataProvider: view, animator: view.chartAnimator, viewPortHandler: view.viewPortHandler)
}
// Override draw function
override func drawValues(context: CGContext)
{
guard
let dataProvider = dataProvider,
let candleData = dataProvider.candleData
else { return }
guard isDrawingValuesAllowed(dataProvider: dataProvider) else { return }
var dataSets = candleData.dataSets
let phaseY = animator.phaseY
var pt = CGPoint()
for i in 0 ..< dataSets.count
{
guard let dataSet = dataSets[i] as? IBarLineScatterCandleBubbleChartDataSet
else { continue }
let valueFont = dataSet.valueFont
let trans = dataProvider.getTransformer(forAxis: dataSet.axisDependency)
let valueToPixelMatrix = trans.valueToPixelMatrix
_xBounds.set(chart: dataProvider, dataSet: dataSet, animator: animator)
let lineHeight = valueFont.lineHeight
let yOffset: CGFloat = lineHeight + 5.0
for j in stride(from: _xBounds.min, through: _xBounds.range + _xBounds.min, by: 1)
{
guard let e = dataSet.entryForIndex(j) as? CandleChartDataEntry else { break }
guard e.high == maxValue || e.low == minValue else { continue }
pt.x = CGFloat(e.x)
if e.high == maxValue {
pt.y = CGFloat(e.high * phaseY)
} else if e.low == minValue {
pt.y = CGFloat(e.low * phaseY)
}
pt = pt.applying(valueToPixelMatrix)
if (!viewPortHandler.isInBoundsRight(pt.x))
{
break
}
if (!viewPortHandler.isInBoundsLeft(pt.x) || !viewPortHandler.isInBoundsY(pt.y))
{
continue
}
if dataSet.isDrawValuesEnabled
{
// In this part we draw min and max values
var textValue: String?
var align: NSTextAlignment = .center
if e.high == maxValue {
pt.y -= yOffset
textValue = "← " + String(maxValue)
align = .left
} else if e.low == minValue {
pt.y += yOffset / 5
textValue = String(minValue) + " →"
align = .right
}
if let textValue = textValue {
ChartUtils.drawText(
context: context,
text: textValue,
point: CGPoint(
x: pt.x,
y: pt.y ),
align: align,
attributes: [NSAttributedStringKey.font: valueFont, NSAttributedStringKey.foregroundColor: dataSet.valueTextColorAt(j)])
}
}
}
}
}
}
Do not forget use you custom renderer for you chart. ;)
myCandleStickChartView.renderer = MyCandleStickChartRenderer(view: myCandleStickChartView, minValue: 400, maxValue: 1450)

Exit ARSCNView to rotate objects with allowsCameraControl

With ARKit I create boxes in my room and then I want to be able to exit AR scene to see my boxes and rotate around it.
I tried with allowsCameraControl = true and now I'm able to zoom and drag my objects but I would like the camera to turn around them while for now the object turns around me. I've looked the 604's video of WWDC which explains the option is self.sceneView.defaultCameraController.interactionMode = .orbitTurntable but I can't manage to make it work...
Basically all my 'box' are on the horizontal plane and I have a topBox which is at my ceiling.
What I've tried:
func getCenterBox() -> SCNNode? {
guard let _ = self.sceneView.scene.rootNode.childNode(withName: "box", recursively: true) else {
return nil
}
let edges = self.sceneView.scene.rootNode.childNodes.filter { $0.name == "box" }
let edgesByX = edges.sorted { $0.position.x < $1.position.x }
let minX = edgesByX.first?.position.x
let maxX = edgesByX.last?.position.x
let edgesByZ = edges.sorted { $0.position.z < $1.position.z }
let minZ = edgesByZ.first?.position.z
let maxZ = edgesByZ.last?.position.z
let centerBox = SCNBox(width: 1, height: 1, length: 1, chamferRadius: 0.5)
let centerNode = SCNNode(geometry: centerBox)
let centerNodePosition = SCNVector3Make((maxX! - minX!)/2 + minX!, ((topBox?.position.y)! - (edges.first?.position.y)!)/2 + (edges.first?.position.y)!, (maxZ! - minZ!)/2 + minZ!)
centerNode.position = centerNodePosition
return SCNNode(geometry: SCNGeometry())
}
#IBAction func stopARTapped(_ sender: UIButton) {
self.sceneView.allowsCameraControl = true
self.sceneView.defaultCameraController.interactionMode = .orbitTurntable
self.sceneView.defaultCameraController.inertiaEnabled = true
self.sceneView.defaultCameraController.maximumHorizontalAngle = 0
self.sceneView.defaultCameraController.maximumVerticalAngle = 0
self.sceneView.defaultCameraController.minimumHorizontalAngle = 0
self.sceneView.defaultCameraController.minimumVerticalAngle = 0
guard let centerNode = getCenterBox() else {
return
}
self.sceneView.scene.rootNode.addChildNode(centerNode)
let lookAtConstraint = SCNLookAtConstraint(target: centerNode)
if self.sceneView.pointOfView?.constraints == nil {
self.sceneView.pointOfView?.constraints = [lookAtConstraint]
} else {
self.sceneView.pointOfView?.constraints?.append(lookAtConstraint)
}
}