I'm trying to create triangle 2D mesh using SwiftUI & Metal. I have lists of vertices[929 items] and indices[1750 items] to draw a mesh. I have used method .drawIndexedPrimitives, but it isn't correct for this purpose. I think this problem should be solved by using MTKMesh, but I don't know how to apply this to 2D mesh with lists of vertices and indices. Please suggest/give some ideas to solve this problem
The code of Renderer Class below
import Foundation
import MetalKit
class Renderer: NSObject, MTKViewDelegate {
var parent: MetalViewComponent
var metalDevice: MTLDevice!
var metalCommandQueue: MTLCommandQueue!
let pipelineState: MTLRenderPipelineState
let vertexBuffer: MTLBuffer?
// let mesh = MTKMesh(mesh: <#T##MDLMesh#>, device: <#T##MTLDevice#>)
let indexBuffer: MTLBuffer?
init(_ parent: MetalViewComponent) {
self.parent = parent
if let metalDevice = MTLCreateSystemDefaultDevice() {
self.metalDevice = metalDevice
}
self.metalCommandQueue = metalDevice.makeCommandQueue()
let pipelineDescriptor = MTLRenderPipelineDescriptor()
let library = metalDevice.makeDefaultLibrary()
pipelineDescriptor.vertexFunction = library?.makeFunction(name: "vertexShader")
pipelineDescriptor.fragmentFunction = library?.makeFunction(name: "fragmentShader")
pipelineDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
do {
try pipelineState = metalDevice.makeRenderPipelineState(descriptor: pipelineDescriptor)
} catch {
fatalError()
}
let vertices = [Vertex(position: [-3.49195e-17, -0.570297], color: [0, 1, 1, 1]),
Vertex(position: [-0.245024, -0.578218], color: [0, 1, 1, 1]),
Vertex(position: [-0.529488, -0.52307], color: [0, 1, 1, 1]),...
let indices: [UInt32] = [470,469,433,
433,469,432,
470,433,434,
506,469,507,
469,470,507, // from 1 to 928
vertexBuffer = metalDevice.makeBuffer(bytes: vertices, length: vertices.count * MemoryLayout<Vertex>.stride, options: [])!
indexBuffer = metalDevice.makeBuffer(bytes: indices, length: indices.count * MemoryLayout<UInt16>.size, options: [])
super.init()
print("Число вертексов: \(vertices.count)")
}
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {
}
func draw(in view: MTKView) {
guard let drawable = view.currentDrawable else {
return
}
let commandBuffer = metalCommandQueue.makeCommandBuffer()
let renderPassDescriptor = view.currentRenderPassDescriptor
renderPassDescriptor?.colorAttachments[0].clearColor = MTLClearColorMake(0, 0, 0, 1.0)
renderPassDescriptor?.colorAttachments[0].loadAction = .clear
renderPassDescriptor?.colorAttachments[0].storeAction = .store
let renderEncoder = commandBuffer?.makeRenderCommandEncoder(descriptor: renderPassDescriptor!)
renderEncoder?.setRenderPipelineState(pipelineState)
renderEncoder?.setVertexBuffer(vertexBuffer, offset: 0, index: 0)
renderEncoder?.drawIndexedPrimitives(type: .lineStrip, indexCount: 1750, indexType: .uint32, indexBuffer: indexBuffer!, indexBufferOffset: 0)
renderEncoder?.endEncoding()
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
}//
This is what's drawing for now
this is what I expect
I am doing some task to apply filter effect in to my WebRTC call, follow this tutorial:
https://developer.apple.com/documentation/vision/applying_matte_effects_to_people_in_images_and_video
Here is my code to convert:
func capturer(_ capturer: RTCVideoCapturer, didCapture frame: RTCVideoFrame) {
let pixelBufferr = frame.buffer as! RTCCVPixelBuffer
let pixelBufferRef = pixelBufferr.pixelBuffer
if #available(iOS 15.0, *) {
DispatchQueue.global().async {
if let output = GreetingProcessor.shared.processVideoFrame(
foreground: pixelBufferRef,
background: self.vbImage) {
print("new output: \(output) => \(output.pixelBuffer) + \(self.buffer(from: output))")
guard let px = output.pixelBuffer else { return }
let rtcPixelBuffer = RTCCVPixelBuffer(pixelBuffer: px)
let i420buffer = rtcPixelBuffer.toI420()
let newFrame = RTCVideoFrame(buffer: i420buffer, rotation: frame.rotation, timeStampNs: frame.timeStampNs)
self.videoSource.capturer(capturer, didCapture: newFrame)
}
}
}
}
THen here is how I apply effect:
func blendImages(
background: CIImage,
foreground: CIImage,
mask: CIImage,
isRedMask: Bool = false
) -> CIImage? {
// scale mask
let maskScaleX = foreground.extent.width / mask.extent.width
let maskScaleY = foreground.extent.height / mask.extent.height
let maskScaled = mask.transformed(by: __CGAffineTransformMake(maskScaleX, 0, 0, maskScaleY, 0, 0))
// scale background
let backgroundScaleX = (foreground.extent.width / background.extent.width)
let backgroundScaleY = (foreground.extent.height / background.extent.height)
let backgroundScaled = background.transformed(
by: __CGAffineTransformMake(backgroundScaleX, 0, 0, backgroundScaleY, 0, 0))
let blendFilter = isRedMask ? CIFilter.blendWithRedMask() : CIFilter.blendWithMask()
blendFilter.inputImage = foreground
blendFilter.backgroundImage = backgroundScaled
blendFilter.maskImage = maskScaled
return blendFilter.outputImage
}
The problem is output.pixelBuffer always nil, so I can not create RTCFrame to pass it again to delegate
Can someone help?
I have some code I can't change that expects to be able to draw at any time. It's the main() function in BackgroundThread below - pretend it can't be modified in any way. Running this will use 70-80% CPU.
If instead of running the thread I replicate what it is doing in View::draw() (i.e. draw 5000 white rectangles at random positions), this will use about 30% CPU.
Where's the difference coming from? Looking at Instruments, although the call stack is the same starting from CGContextFillRect, the View::draw() version only spends 16% of the time doing memset() whereas the threaded version spends 80% of the time.
The code below is the FAST version. Comment out the FAST lines and uncomment the SLOW lines to switch to the SLOW (threaded) version. Compile with swiftc test.swift -otest && ./test. I'm on macOS 10.13, integrated graphics, if that matters.
Is there anything I can do to make the threaded version as fast as the View::draw() version?
import Cocoa
let NSApp = NSApplication.shared,
vwaitSem = DispatchSemaphore(value: 0)
var
mainWindow: NSWindow?,
screen: CGContext?,
link: CVDisplayLink?
class View: NSView, CALayerDelegate {
var lastTime: CFTimeInterval = 0
override var acceptsFirstResponder: Bool {return true}
required init(coder aDecoder: NSCoder) {fatalError("This class does not support NSCoding")}
override func makeBackingLayer() -> CALayer {return CALayer()}
override init(frame: CGRect) {
super.init(frame: frame)
self.wantsLayer = true
self.layer?.contentsScale = 2.0
self.layer?.backgroundColor = CGColor(red:0, green:0, blue:0, alpha: 1)
self.layerContentsRedrawPolicy = NSView.LayerContentsRedrawPolicy.onSetNeedsDisplay // FAST
}
func draw(_ layer: CALayer, in ctx: CGContext) {
let now = CACurrentMediaTime(), timePassed = ((now-lastTime)*1000).rounded()
// NSLog("\(timePassed)")
lastTime = now
ctx.setFillColor(CGColor.white)
ctx.setStrokeColor(CGColor.white)
for _ in 0...5000 {
let rect = CGRect(x: CGFloat(arc4random_uniform(640)+1), y: CGFloat(arc4random_uniform(480)+1), width:6, height:6)
ctx.setFillColor(CGColor.white)
ctx.fill(rect)
}
}
}
func displayLinkOutputCallback(_ displayLink: CVDisplayLink, _ nowPtr: UnsafePointer<CVTimeStamp>,
_ outputTimePtr: UnsafePointer<CVTimeStamp>, _ flagsIn: CVOptionFlags, _ flagsOut: UnsafeMutablePointer<CVOptionFlags>,
_ displayLinkContext: UnsafeMutableRawPointer?) -> CVReturn {
DispatchQueue.main.async {
// mainWindow!.contentView!.layer!.contents = screen!.makeImage() // SLOW
mainWindow!.contentView!.display() // FAST
vwaitSem.signal()
}
return kCVReturnSuccess
}
class BackgroundThread: Thread {
var lastTime: CFTimeInterval = 0
override func main() {
while true {
let now = CACurrentMediaTime(), timePassed = ((now-lastTime)*1000).rounded()
// NSLog("\(timePassed)")
lastTime = now
screen?.clear(CGRect(x:0, y:0, width:640*2, height:480*2))
for _ in 0...5000 {
screen?.setFillColor(CGColor.white)
screen?.setStrokeColor(CGColor.white)
screen?.fill(CGRect(x: CGFloat(arc4random_uniform(640*2)+1), y: CGFloat(arc4random_uniform(480*2)+1), width: 6*2, height: 6*2))
}
vwaitSem.wait()
}
}
}
let width = 640, height = 480,
appMenuItem = NSMenuItem(),
quitMenuItem = NSMenuItem(title:"Quit",
action:#selector(NSApplication.terminate), keyEquivalent:"q"),
window = NSWindow(contentRect:NSMakeRect(0,0, CGFloat(width), CGFloat(height)),
styleMask:[.closable,.titled], backing:.buffered, defer:false),
colorProfile = ColorSyncProfileCreateWithDisplayID(0),
colorSpace = CGColorSpace(platformColorSpaceRef: colorProfile!.toOpaque()),
screen_ = CGContext(data: nil, width: Int(width)*2, height:Int(height)*2, bitsPerComponent:8, bytesPerRow: 0,
space: colorSpace!, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue),
backgroundThread = BackgroundThread()
NSApp.setActivationPolicy(NSApplication.ActivationPolicy.regular)
NSApp.mainMenu = NSMenu()
NSApp.mainMenu?.addItem(appMenuItem)
appMenuItem.submenu = NSMenu()
appMenuItem.submenu?.addItem(quitMenuItem)
window.cascadeTopLeft(from:NSMakePoint(20,20))
window.makeKeyAndOrderFront(nil)
window.contentView = View()
window.makeFirstResponder(window.contentView)
NSApp.activate(ignoringOtherApps:true)
mainWindow = window
screen = screen_
CVDisplayLinkCreateWithCGDisplay(CGMainDisplayID(), &link)
CVDisplayLinkSetOutputCallback(link!, displayLinkOutputCallback, UnsafeMutableRawPointer(Unmanaged.passUnretained(window).toOpaque()))
CVDisplayLinkStart(link!)
// backgroundThread.start() // SLOW
NSApp.run()
I misread the note in the documentation for makeImage() and thought it would not copy the data unless it really had to. Well, Instruments shows it does copy the data. Every single frame.
So I switched to Metal and now I can draw from the background thread with the same performance/CPU usage as with CGContext alone, with no copies as far as I can tell.
Here's some working code:
import Cocoa
import MetalKit
class View: MTKView {
var screen: CGContext?
var commandQueue: MTLCommandQueue?
var buffer: MTLBuffer?
var texture: MTLTexture?
var vwaitSem = DispatchSemaphore(value: 0)
var backgroundThread: Thread?
var allocationSize = 0
func alignUp(size: Int, align: Int) -> Int {return (size+(align-1)) & ~(align-1)}
override var acceptsFirstResponder: Bool {return true}
required init(coder aDecoder: NSCoder) {fatalError("This class does not support NSCoding")}
init() {super.init(frame: CGRect(x:0, y:0, width:0, height: 0), device: MTLCreateSystemDefaultDevice())}
override func viewDidMoveToWindow() {
layer?.contentsScale = NSScreen.main!.backingScaleFactor
let metalLayer = layer as! CAMetalLayer
let pixelRowAlignment = metalLayer.device!.minimumLinearTextureAlignment(for: metalLayer.pixelFormat)
let bytesPerRow = alignUp(size: Int(layer!.frame.width)*Int(layer!.contentsScale)*4, align: pixelRowAlignment)
let pagesize = Int(getpagesize())
var data: UnsafeMutableRawPointer? = nil
allocationSize = alignUp(size: bytesPerRow*Int(layer!.frame.height)*Int(layer!.contentsScale), align: pagesize)
posix_memalign(&data, pagesize, allocationSize)
let colorProfile = ColorSyncProfileCreateWithDisplayID(0),
colorSpace = CGColorSpace(platformColorSpaceRef: colorProfile!.toOpaque()),
screen_ = CGContext(data: data,
width: Int(layer!.frame.width)*Int(layer!.contentsScale),
height: Int(layer!.frame.height)*Int(layer!.contentsScale),
bitsPerComponent:8, bytesPerRow: bytesPerRow,
space: colorSpace!, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)!,
buffer_ = metalLayer.device!.makeBuffer(bytesNoCopy: data!, length: allocationSize, options: .storageModeManaged,
deallocator: { pointer, length in free(self.screen!.data!) })!,
textureDescriptor = MTLTextureDescriptor()
textureDescriptor.pixelFormat = metalLayer.pixelFormat
textureDescriptor.width = screen_.width
textureDescriptor.height = screen_.height
textureDescriptor.storageMode = buffer_.storageMode
textureDescriptor.usage = MTLTextureUsage(rawValue: MTLTextureUsage.shaderRead.rawValue)
texture = buffer_.makeTexture(descriptor: textureDescriptor, offset: 0, bytesPerRow: screen_.bytesPerRow)
commandQueue = device?.makeCommandQueue()
screen = screen_
buffer = buffer_
backgroundThread = BackgroundThread(screen: screen!, vwaitSem: vwaitSem)
backgroundThread!.start()
}
override func draw(_ dirtyRect: NSRect) {
if let drawable = currentDrawable {
buffer!.didModifyRange(0..<allocationSize)
texture!.replace(region: MTLRegionMake2D(0,0, screen!.width, screen!.height),
mipmapLevel:0, slice:0, withBytes: screen!.data!, bytesPerRow: screen!.bytesPerRow, bytesPerImage: 0)
let commandBuffer = commandQueue!.makeCommandBuffer()!
let blitPass = commandBuffer.makeBlitCommandEncoder()!
blitPass.copy(from: texture!, sourceSlice:0, sourceLevel:0, sourceOrigin: MTLOrigin(x:0,y:0,z:0),
sourceSize: MTLSize(width:screen!.width, height:screen!.height, depth: 1),
to: drawable.texture, destinationSlice:0, destinationLevel:0, destinationOrigin: MTLOrigin(x:0,y:0,z:0))
blitPass.endEncoding()
if let renderPass = currentRenderPassDescriptor {
renderPass.colorAttachments[0].texture = drawable.texture
renderPass.colorAttachments[0].loadAction = .load
commandBuffer.makeRenderCommandEncoder(descriptor: renderPass)!.endEncoding()
commandBuffer.addCompletedHandler {cb in self.vwaitSem.signal()}
commandBuffer.present(drawable)
commandBuffer.commit()
}
}
}
}
class BackgroundThread: Thread {
var screen: CGContext
var vwaitSem: DispatchSemaphore
var x = 0
init(screen:CGContext, vwaitSem:DispatchSemaphore) {
self.screen = screen
self.vwaitSem = vwaitSem
}
override func main() {
while true {
// screen.clear(CGRect(x:0,y:0, width:screen.width, height:screen.height))
// screen.setFillColor(CGColor.white)
// screen.fill(CGRect(x:x, y:0, width:100, height:100))
// x += 1
screen.clear(CGRect(x:0,y:0, width:screen.width, height:screen.height))
screen.setFillColor(CGColor.white)
let screenWidth = UInt32(screen.width), screenHeight = UInt32(screen.height)
for _ in 0...5000 {
let rect = CGRect(x: CGFloat(arc4random_uniform(screenWidth+1)),
y: CGFloat(arc4random_uniform(screenHeight+1)), width:6, height:6)
screen.fill(rect)
}
vwaitSem.wait()
}
}
}
let width = 640, height = 480,
appMenuItem = NSMenuItem(),
quitMenuItem = NSMenuItem(title:"Quit",
action:#selector(NSApplication.terminate), keyEquivalent:"q"),
window = NSWindow(contentRect:NSMakeRect(0,0, CGFloat(width), CGFloat(height)),
styleMask:[.closable,.titled], backing:.buffered, defer:false)
NSApp.setActivationPolicy(NSApplication.ActivationPolicy.regular)
NSApp.mainMenu = NSMenu()
NSApp.mainMenu?.addItem(appMenuItem)
appMenuItem.submenu = NSMenu()
appMenuItem.submenu?.addItem(quitMenuItem)
window.cascadeTopLeft(from:NSMakePoint(20,20))
window.makeKeyAndOrderFront(nil)
window.contentView = View()
window.makeFirstResponder(window.contentView)
NSApp.activate(ignoringOtherApps:true)
NSApp.run()
I'm implementing custom transition using CABasicAnimation and UIView.animate both. Also need to implement a custom interactive transition using UIPercentDrivenInteractiveTransition which exactly copies the behavior of the native iOS swipe back. Animation without a back swipe gesture (when I'm pushing and popping by the back arrow) works fine and smoothly. Moreover, swipe back also works smoothly, except when the gesture velocity is more than 900
Gesture Recognition function:
#objc func handleBackGesture(_ gesture: UIScreenEdgePanGestureRecognizer) {
guard animationTransition != nil else { return }
switch gesture.state {
case .began:
interactionController = TransparentNavigationControllerTransitionInteractor(duration: anumationDuration)
popViewController(animated: true)
case .changed:
guard let view = gesture.view?.superview else { return }
let translation = gesture.translation(in: view)
var percentage = translation.x / view.bounds.size.width
percentage = min(1.0, max(0.0, percentage))
shouldCompleteTransition = percentage > 0.5
interactionController?.update(percentage)
case .cancelled, .failed, .possible:
if let interactionController = self.interactionController {
isInteractiveStarted = false
interactionController.cancel()
}
case .ended:
interactionController?.completionSpeed = 0.999
let greaterThanMaxVelocity = gesture.velocity(in: view).x > 800
let canFinish = shouldCompleteTransition || greaterThanMaxVelocity
canFinish ? interactionController?.finish() : interactionController?.cancel()
interactionController = nil
#unknown default: assertionFailure()
}
}
UIPercentDrivenInteractiveTransition class. Here I'm synchronizing layer animation.
final class TransparentNavigationControllerTransitionInteractor: UIPercentDrivenInteractiveTransition {
// MARK: - Private Properties
private var context: UIViewControllerContextTransitioning?
private var pausedTime: CFTimeInterval = 0
private let animationDuration: TimeInterval
// MARK: - Initialization
init(duration: TimeInterval) {
self.animationDuration = duration * 0.4 // I dk why but layer duration should be less
super.init()
}
// MARK: - Public Methods
override func startInteractiveTransition(_ transitionContext: UIViewControllerContextTransitioning) {
super.startInteractiveTransition(transitionContext)
context = transitionContext
pausedTime = transitionContext.containerView.layer.convertTime(CACurrentMediaTime(), from: nil)
transitionContext.containerView.layer.speed = 0
transitionContext.containerView.layer.timeOffset = pausedTime
}
override func finish() {
restart(isFinishing: true)
super.finish()
}
override func cancel() {
restart(isFinishing: false)
super.cancel()
}
override func update(_ percentComplete: CGFloat) {
super.update(percentComplete)
guard let transitionContext = context else { return }
let progress = CGFloat(animationDuration) * percentComplete
transitionContext.containerView.layer.timeOffset = pausedTime + Double(progress)
}
// MARK: - Private Methods
private func restart(isFinishing: Bool) {
guard let transitionLayer = context?.containerView.layer else { return }
transitionLayer.beginTime = transitionLayer.convertTime(CACurrentMediaTime(), from: nil)
transitionLayer.speed = isFinishing ? 1 : -1
}
}
And here is my Dismissal animation function in UIViewControllerAnimatedTransitioning class
private func runDismissAnimationFrom(
_ fromView: UIView,
to toView: UIView,
in transitionContext: UIViewControllerContextTransitioning) {
guard let toViewController = transitionContext.viewController(forKey: .to) else { return }
toView.frame = toView.frame.offsetBy(dx: -fromView.frame.width / 3, dy: 0)
let toViewFinalFrame = transitionContext.finalFrame(for: toViewController)
let fromViewFinalFrame = fromView.frame.offsetBy(dx: fromView.frame.width, dy: 0)
// Create mask to hide bottom view with sliding
let slidingMask = CAShapeLayer()
let initialMaskPath = UIBezierPath(rect: CGRect(
x: fromView.frame.width / 3,
y: 0,
width: 0,
height: toView.frame.height)
)
let finalMaskPath = UIBezierPath(rect: toViewFinalFrame)
slidingMask.path = initialMaskPath.cgPath
toView.layer.mask = slidingMask
toView.alpha = 0
let slidingAnimation = CABasicAnimation(keyPath: "path")
slidingAnimation.fromValue = initialMaskPath.cgPath
slidingAnimation.toValue = finalMaskPath.cgPath
slidingAnimation.timingFunction = .init(name: .linear)
slidingMask.path = finalMaskPath.cgPath
slidingMask.add(slidingAnimation, forKey: slidingAnimation.keyPath)
UIView.animate(
withDuration: duration,
delay: 0,
options: animationOptions,
animations: {
fromView.frame = fromViewFinalFrame
toView.frame = toViewFinalFrame
toView.alpha = 1
},
completion: { _ in
toView.layer.mask = nil
transitionContext.completeTransition(!transitionContext.transitionWasCancelled)
})
}
I note that glitch occurs only when a swipe has a grand velocity.
Here a video with the result of smooth animation at normal speed and not smooth at high speed - https://youtu.be/1d-kTPlhNvE
UPD:
I've already tried to use UIViewPropertyAnimator combine with
interruptibleAnimator(using transitionContext: UIViewControllerContextTransitioning) -> UIViewImplicitlyAnimating
But the result is another type of glitching.
I've solved the issue, just change a part of restart function:
transitionLayer.beginTime =
transitionLayer.convertTime(CACurrentMediaTime(), from: nil) - transitionLayer.timeOffset
transitionLayer.speed = 1
I don't really understand why, but looks like timeOffset subtraction works!
I'm trying to make something simple to test Vision Framework on MacOS.
I tried to modify code from this tutorial to use a single image from screenshot instead of camera feed.
https://www.appcoda.com/vision-framework-introduction/
However, I get this error:
Error Domain=com.apple.vis Code=3 "Failed to create image for processing due to invalid requested buffer dimensions"
Is it because screenshot image doesn't fit certain specification? Do I need to preprocess the file?
If so, how can I process it in order to fit the dimensions?
My testing code is below.
Thanks!
import Cocoa
import Vision
class ViewController: NSViewController {
var requests = [VNRequest]()
func start() {
let textRequest = VNDetectTextRectanglesRequest(completionHandler: self.detectTextHandler)
textRequest.reportCharacterBoxes = true
self.requests = [textRequest]
let url = URL(fileURLWithPath:NSString(string:"~/Screenshot.png").expandingTildeInPath)
let imageRequestHandler = VNImageRequestHandler(url:url)
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
func detectTextHandler(request: VNRequest, error: Error?) {
guard let observations = request.results else {
print("no result")
return
}
let result = observations.map({$0 as? VNTextObservation})
DispatchQueue.main.async() {
for region in result {
guard let rg = region else {
continue
}
self.highlightWord(box: rg)
if let boxes = region?.characterBoxes {
for characterBox in boxes {
self.highlightLetters(box: characterBox)
}
}
}
}
}
func highlightWord(box: VNTextObservation) {
guard let boxes = box.characterBoxes else {
return
}
var maxX: CGFloat = 9999.0
var minX: CGFloat = 0.0
var maxY: CGFloat = 9999.0
var minY: CGFloat = 0.0
for char in boxes {
if char.bottomLeft.x < maxX {
maxX = char.bottomLeft.x
}
if char.bottomRight.x > minX {
minX = char.bottomRight.x
}
if char.bottomRight.y < maxY {
maxY = char.bottomRight.y
}
if char.topRight.y > minY {
minY = char.topRight.y
}
}
let xCord = maxX
let yCord = (1 - minY)
let width = (minX - maxX)
let height = (minY - maxY)
let frame = CGRect(x: xCord, y: yCord, width: width, height: height)
print("Word: \(frame)")
}
func highlightLetters(box: VNRectangleObservation) {
let xCord = box.topLeft.x
let yCord = (1 - box.topLeft.y)
let width = (box.topRight.x - box.bottomLeft.x)
let height = (box.topLeft.y - box.bottomLeft.y)
let frame = CGRect(x: xCord, y: yCord, width: width, height: height)
print("Letter: \(frame)")
}
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
start()
}
override var representedObject: Any? {
didSet {
// Update the view, if already loaded.
}
}
}