How Do I Process an Image File to fit buffer dimensions for Vision Framework on MacOS? - swift

I'm trying to make something simple to test Vision Framework on MacOS.
I tried to modify code from this tutorial to use a single image from screenshot instead of camera feed.
https://www.appcoda.com/vision-framework-introduction/
However, I get this error:
Error Domain=com.apple.vis Code=3 "Failed to create image for processing due to invalid requested buffer dimensions"
Is it because screenshot image doesn't fit certain specification? Do I need to preprocess the file?
If so, how can I process it in order to fit the dimensions?
My testing code is below.
Thanks!
import Cocoa
import Vision
class ViewController: NSViewController {
var requests = [VNRequest]()
func start() {
let textRequest = VNDetectTextRectanglesRequest(completionHandler: self.detectTextHandler)
textRequest.reportCharacterBoxes = true
self.requests = [textRequest]
let url = URL(fileURLWithPath:NSString(string:"~/Screenshot.png").expandingTildeInPath)
let imageRequestHandler = VNImageRequestHandler(url:url)
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
func detectTextHandler(request: VNRequest, error: Error?) {
guard let observations = request.results else {
print("no result")
return
}
let result = observations.map({$0 as? VNTextObservation})
DispatchQueue.main.async() {
for region in result {
guard let rg = region else {
continue
}
self.highlightWord(box: rg)
if let boxes = region?.characterBoxes {
for characterBox in boxes {
self.highlightLetters(box: characterBox)
}
}
}
}
}
func highlightWord(box: VNTextObservation) {
guard let boxes = box.characterBoxes else {
return
}
var maxX: CGFloat = 9999.0
var minX: CGFloat = 0.0
var maxY: CGFloat = 9999.0
var minY: CGFloat = 0.0
for char in boxes {
if char.bottomLeft.x < maxX {
maxX = char.bottomLeft.x
}
if char.bottomRight.x > minX {
minX = char.bottomRight.x
}
if char.bottomRight.y < maxY {
maxY = char.bottomRight.y
}
if char.topRight.y > minY {
minY = char.topRight.y
}
}
let xCord = maxX
let yCord = (1 - minY)
let width = (minX - maxX)
let height = (minY - maxY)
let frame = CGRect(x: xCord, y: yCord, width: width, height: height)
print("Word: \(frame)")
}
func highlightLetters(box: VNRectangleObservation) {
let xCord = box.topLeft.x
let yCord = (1 - box.topLeft.y)
let width = (box.topRight.x - box.bottomLeft.x)
let height = (box.topLeft.y - box.bottomLeft.y)
let frame = CGRect(x: xCord, y: yCord, width: width, height: height)
print("Letter: \(frame)")
}
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
start()
}
override var representedObject: Any? {
didSet {
// Update the view, if already loaded.
}
}
}

Related

CIImage pixelBuffer always return nil

I am doing some task to apply filter effect in to my WebRTC call, follow this tutorial:
https://developer.apple.com/documentation/vision/applying_matte_effects_to_people_in_images_and_video
Here is my code to convert:
func capturer(_ capturer: RTCVideoCapturer, didCapture frame: RTCVideoFrame) {
let pixelBufferr = frame.buffer as! RTCCVPixelBuffer
let pixelBufferRef = pixelBufferr.pixelBuffer
if #available(iOS 15.0, *) {
DispatchQueue.global().async {
if let output = GreetingProcessor.shared.processVideoFrame(
foreground: pixelBufferRef,
background: self.vbImage) {
print("new output: \(output) => \(output.pixelBuffer) + \(self.buffer(from: output))")
guard let px = output.pixelBuffer else { return }
let rtcPixelBuffer = RTCCVPixelBuffer(pixelBuffer: px)
let i420buffer = rtcPixelBuffer.toI420()
let newFrame = RTCVideoFrame(buffer: i420buffer, rotation: frame.rotation, timeStampNs: frame.timeStampNs)
self.videoSource.capturer(capturer, didCapture: newFrame)
}
}
}
}
THen here is how I apply effect:
func blendImages(
background: CIImage,
foreground: CIImage,
mask: CIImage,
isRedMask: Bool = false
) -> CIImage? {
// scale mask
let maskScaleX = foreground.extent.width / mask.extent.width
let maskScaleY = foreground.extent.height / mask.extent.height
let maskScaled = mask.transformed(by: __CGAffineTransformMake(maskScaleX, 0, 0, maskScaleY, 0, 0))
// scale background
let backgroundScaleX = (foreground.extent.width / background.extent.width)
let backgroundScaleY = (foreground.extent.height / background.extent.height)
let backgroundScaled = background.transformed(
by: __CGAffineTransformMake(backgroundScaleX, 0, 0, backgroundScaleY, 0, 0))
let blendFilter = isRedMask ? CIFilter.blendWithRedMask() : CIFilter.blendWithMask()
blendFilter.inputImage = foreground
blendFilter.backgroundImage = backgroundScaled
blendFilter.maskImage = maskScaled
return blendFilter.outputImage
}
The problem is output.pixelBuffer always nil, so I can not create RTCFrame to pass it again to delegate
Can someone help?

Cropping is not working perfectly as per the frame drawn

I am trying to crop a selected portion of NSImage which is fitted as per ProportionallyUpOrDown(AspectFill) Mode.
I am drawing a frame using mouse dragged event like this:
class CropImageView: NSImageView {
var startPoint: NSPoint!
var shapeLayer: CAShapeLayer!
var flagCheck = false
var finalPoint: NSPoint!
override init(frame frameRect: NSRect) {
super.init(frame: frameRect)
}
required init?(coder: NSCoder) {
super.init(coder: coder)
}
override func draw(_ dirtyRect: NSRect) {
super.draw(dirtyRect)
}
override var image: NSImage? {
set {
self.layer = CALayer()
self.layer?.contentsGravity = kCAGravityResizeAspectFill
self.layer?.contents = newValue
self.wantsLayer = true
super.image = newValue
}
get {
return super.image
}
}
override func mouseDown(with event: NSEvent) {
self.startPoint = self.convert(event.locationInWindow, from: nil)
if self.shapeLayer != nil {
self.shapeLayer.removeFromSuperlayer()
self.shapeLayer = nil
}
self.flagCheck = true
var pixelColor: NSColor = NSReadPixel(startPoint) ?? NSColor()
shapeLayer = CAShapeLayer()
shapeLayer.lineWidth = 1.0
shapeLayer.fillColor = NSColor.clear.cgColor
if pixelColor == NSColor.black {
pixelColor = NSColor.color_white
} else {
pixelColor = NSColor.black
}
shapeLayer.strokeColor = pixelColor.cgColor
shapeLayer.lineDashPattern = [1]
self.layer?.addSublayer(shapeLayer)
var dashAnimation = CABasicAnimation()
dashAnimation = CABasicAnimation(keyPath: "lineDashPhase")
dashAnimation.duration = 0.75
dashAnimation.fromValue = 0.0
dashAnimation.toValue = 15.0
dashAnimation.repeatCount = 0.0
shapeLayer.add(dashAnimation, forKey: "linePhase")
}
override func mouseDragged(with event: NSEvent) {
let point: NSPoint = self.convert(event.locationInWindow, from: nil)
var newPoint: CGPoint = self.startPoint
let xDiff = point.x - self.startPoint.x
let yDiff = point.y - self.startPoint.y
let dist = min(abs(xDiff), abs(yDiff))
newPoint.x += xDiff > 0 ? dist : -dist
newPoint.y += yDiff > 0 ? dist : -dist
let path = CGMutablePath()
path.move(to: self.startPoint)
path.addLine(to: NSPoint(x: self.startPoint.x, y: newPoint.y))
path.addLine(to: newPoint)
path.addLine(to: NSPoint(x: newPoint.x, y: self.startPoint.y))
path.closeSubpath()
self.shapeLayer.path = path
}
override func mouseUp(with event: NSEvent) {
self.finalPoint = self.convert(event.locationInWindow, from: nil)
}
}
and selected this area as shown in picture using black dotted line:
My Cropping Code logic is this:
// resize Image Methods
extension CropProfileView {
func resizeImage(image: NSImage) -> Data {
var scalingFactor: CGFloat = 0.0
if image.size.width >= image.size.height {
scalingFactor = image.size.width/cropImgView.size.width
} else {
scalingFactor = image.size.height/cropImgView.size.height
}
let width = (self.cropImgView.finalPoint.x - self.cropImgView.startPoint.x) * scalingFactor
let height = (self.cropImgView.startPoint.y - self.cropImgView.finalPoint.y) * scalingFactor
let xPos = ((image.size.width/2) - (cropImgView.bounds.midX - self.cropImgView.startPoint.x) * scalingFactor)
let yPos = ((image.size.height/2) - (cropImgView.bounds.midY - (cropImgView.size.height - self.cropImgView.startPoint.y)) * scalingFactor)
var croppedRect: NSRect = NSRect(x: xPos, y: yPos, width: width, height: height)
let imageRef = image.cgImage(forProposedRect: &croppedRect, context: nil, hints: nil)
guard let croppedImage = imageRef?.cropping(to: croppedRect) else {return Data()}
let imageWithNewSize = NSImage(cgImage: croppedImage, size: NSSize(width: width, height: height))
guard let data = imageWithNewSize.tiffRepresentation,
let rep = NSBitmapImageRep(data: data),
let imgData = rep.representation(using: .png, properties: [.compressionFactor: NSNumber(floatLiteral: 0.25)]) else {
return imageWithNewSize.tiffRepresentation ?? Data()
}
return imgData
}
}
With this cropping logic i am getting this output:
I think as image is AspectFill thats why its not getting cropped in perfect size as per selected frame. Here if you look at output: xpositon & width & heights are not perfect. Or probably i am not calculating these co-ordinates properly. Let me know the faults probably i am calculating someting wrong.
Note: the CropImageView class in the question is a subclass of NSImageView but the view is layer-hosting and the image is drawn by the layer, not by NSImageView. imageScaling is not used.
When deciding which scaling factor to use you have to take the size of the image view into account. If the image size is width:120, height:100 and the image view size is width:120, height 80 then image.size.width >= image.size.height is true and image.size.width/cropImgView.size.width is 1 but the image is scaled because image.size.height/cropImgView.size.height is 1.25. Calculate the horizontal and vertical scaling factors and use the largest.
See How to crop a UIImageView to a new UIImage in 'aspect fill' mode?
Here's the calculation of croppedRect assuming cropImgView.size returns self.layer!.bounds.size.
var scalingWidthFactor: CGFloat = image.size.width/cropImgView.size.width
var scalingHeightFactor: CGFloat = image.size.height/cropImgView.size.height
var xOffset: CGFloat = 0
var yOffset: CGFloat = 0
switch cropImgView.layer?.contentsGravity {
case CALayerContentsGravity.resize: break
case CALayerContentsGravity.resizeAspect:
if scalingWidthFactor > scalingHeightFactor {
scalingHeightFactor = scalingWidthFactor
yOffset = (cropImgView.size.height - (image.size.height / scalingHeightFactor)) / 2
}
else {
scalingWidthFactor = scalingHeightFactor
xOffset = (cropImgView.size.width - (image.size.width / scalingWidthFactor)) / 2
}
case CALayerContentsGravity.resizeAspectFill:
if scalingWidthFactor < scalingHeightFactor {
scalingHeightFactor = scalingWidthFactor
yOffset = (cropImgView.size.height - (image.size.height / scalingHeightFactor)) / 2
}
else {
scalingWidthFactor = scalingHeightFactor
xOffset = (cropImgView.size.width - (image.size.width / scalingWidthFactor)) / 2
}
default:
print("contentsGravity \(String(describing: cropImgView.layer?.contentsGravity)) is not supported")
return nil
}
let width = (self.cropImgView.finalPoint.x - self.cropImgView.startPoint.x) * scalingWidthFactor
let height = (self.cropImgView.startPoint.y - self.cropImgView.finalPoint.y) * scalingHeightFactor
let xPos = (self.cropImgView.startPoint.x - xOffset) * scalingWidthFactor
let yPos = (cropImgView.size.height - self.cropImgView.startPoint.y - yOffset) * scalingHeightFactor
var croppedRect: NSRect = NSRect(x: xPos, y: yPos, width: width, height: height)
Bugfix: cropImgView.finalPoint should be the corner of the selection, not the location of mouseUp. In CropImageView set self.finalPoint = newPoint in mouseDragged instead of mouseUp.

UIScrollView scrollable horizontally even if it has only one page

I have a problem with UIScrollView in my Xcode project. I am using it to show several pictures, but even when it has only one page it still scrolls horizontally. Maybe its something with setupPhotosInScrollView() or scrollViewDidScroll. I don't know how to fix this issue. It seems ok, but still getting this error. Please help!
Here is my code:
override func viewDidLoad() {
super.viewDidLoad()
// Ad Title
eTitleLabel.text = "\(eObj[EVENTS_TITLE]!)"
// Get photos
let imageFile = eObj[EVENTS_IMAGE1] as? PFFile
imageFile?.getDataInBackground(block: { (data, error) in
if error == nil { if let imageData = data {
self.photosArray.append(UIImage(data: imageData)!)
self.setupPhotosInScrollView()
print("PHOTO 1")
}}})
DispatchQueue.main.async {
if self.eObj[EVENTS_IMAGE2] != nil {
self.pageControl.numberOfPages = 2
let imageFile = self.eObj[EVENTS_IMAGE2] as? PFFile
imageFile?.getDataInBackground(block: { (data, error) in
if error == nil { if let imageData = data {
self.photosArray.append(UIImage(data: imageData)!)
self.setupPhotosInScrollView()
print("PHOTO 2")
}}})
}
// ------------------------------------------------
// MARK: - SETUP PHOTOS IN SCROLLVIEW
// ------------------------------------------------
#objc func setupPhotosInScrollView() {
var X:CGFloat = 0
let Y:CGFloat = 0
let W:CGFloat = view.frame.size.width
let H:CGFloat = view.frame.size.height
let G:CGFloat = 0
var counter = 0
// Loop to create ImageViews
for i in 0..<photosArray.count {
counter = i
// Create a ImageView
let aImg = UIImageView(frame: CGRect(x: X, y: Y, width: W, height: H))
aImg.tag = i
aImg.contentMode = .scaleAspectFit
aImg.image = photosArray[i]
// Add ImageViews based on X
X += W + G
containerScrollView.addSubview(aImg)
} // ./ FOR loop
// Place Buttons into a ScrollView
containerScrollView.contentSize = CGSize(width: W * CGFloat(counter+2), height: H)
}
// ------------------------------------------------
// MARK: - CHANGE PAGE CONTROL PAGES ON SCROLL
// ------------------------------------------------
func scrollViewDidScroll(_ scrollView: UIScrollView) {
let pageWidth = containerScrollView.frame.size.width
let page = Int(floor((containerScrollView.contentOffset.x * 2 + pageWidth) / (pageWidth * 2)))
pageControl.currentPage = page
}
Replace
containerScrollView.contentSize = CGSize(width: W * CGFloat(counter+2), height: H)
With
containerScrollView.contentSize = CGSize(width: W * CGFloat(photosArray.count), height: H)
as you set a width for contentSize that's not equal to number of added photos

Take a Screenshot of the entire contents of a UICollectionView

The app I'm working on uses collection view cells to display data to the user. I want the user to be able to share the data that's contained in the cells, but there are usually too many cells to try to re-size and fit onto a single iPhone-screen-sized window and get a screenshot.
So the problem I'm having is trying to get an image of all the cells in a collection view, both on-screen and off-screen. I'm aware that off-screen cells don't actually exist, but I'd be interested in a way to kind of fake an image and draw in the data (if that's possible in swift).
In short, is there a way to programmatically create an image from a collection view and the cells it contains, both on and off screen with Swift?
Update
If memory is not a concern :
mutating func screenshot(scale: CGFloat) -> UIImage {
let currentSize = frame.size
let currentOffset = contentOffset // temp store current offset
frame.size = contentSize
setContentOffset(CGPointZero, animated: false)
// it might need a delay here to allow loading data.
let rect = CGRect(x: 0, y: 0, width: self.bounds.size.width, height: self.bounds.size.height)
UIGraphicsBeginImageContextWithOptions(rect.size, false, UIScreen.mainScreen().scale)
self.drawViewHierarchyInRect(rect, afterScreenUpdates: true)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
frame.size = currentSize
setContentOffset(currentOffset, animated: false)
return resizeUIImage(image, scale: scale)
}
This works for me:
github link -> contains up to date code
getScreenshotRects creates the offsets to which to scroll and the frames to capture. (naming is not perfect)
takeScreenshotAtPoint scrolls to the point, sets a delay to allow a redraw, takes the screenshot and returns this via completion handler.
stitchImages creates a rect with the same size as the content and draws all images in them.
makeScreenshots uses the didSet on a nested array of UIImage and a counter to create all images while also waiting for completion. When this is done it fires it own completion handler.
Basic parts :
scroll collectionview -> works
take screenshot with delay for a redraw -> works
crop images that are overlapping -> apparently not needed
stitch all images -> works
basic math -> works
maybe freeze screen or hide when all this is happening (this is not in my answer)
Code :
protocol ScrollViewImager {
var bounds : CGRect { get }
var contentSize : CGSize { get }
var contentOffset : CGPoint { get }
func setContentOffset(contentOffset: CGPoint, animated: Bool)
func drawViewHierarchyInRect(rect: CGRect, afterScreenUpdates: Bool) -> Bool
}
extension ScrollViewImager {
func screenshot(completion: (screenshot: UIImage) -> Void) {
let pointsAndFrames = getScreenshotRects()
let points = pointsAndFrames.points
let frames = pointsAndFrames.frames
makeScreenshots(points, frames: frames) { (screenshots) -> Void in
let stitched = self.stitchImages(images: screenshots, finalSize: self.contentSize)
completion(screenshot: stitched!)
}
}
private func makeScreenshots(points:[[CGPoint]], frames : [[CGRect]],completion: (screenshots: [[UIImage]]) -> Void) {
var counter : Int = 0
var images : [[UIImage]] = [] {
didSet {
if counter < points.count {
makeScreenshotRow(points[counter], frames : frames[counter]) { (screenshot) -> Void in
counter += 1
images.append(screenshot)
}
} else {
completion(screenshots: images)
}
}
}
makeScreenshotRow(points[counter], frames : frames[counter]) { (screenshot) -> Void in
counter += 1
images.append(screenshot)
}
}
private func makeScreenshotRow(points:[CGPoint], frames : [CGRect],completion: (screenshots: [UIImage]) -> Void) {
var counter : Int = 0
var images : [UIImage] = [] {
didSet {
if counter < points.count {
takeScreenshotAtPoint(point: points[counter]) { (screenshot) -> Void in
counter += 1
images.append(screenshot)
}
} else {
completion(screenshots: images)
}
}
}
takeScreenshotAtPoint(point: points[counter]) { (screenshot) -> Void in
counter += 1
images.append(screenshot)
}
}
private func getScreenshotRects() -> (points:[[CGPoint]], frames:[[CGRect]]) {
let vanillaBounds = CGRect(x: 0, y: 0, width: self.bounds.size.width, height: self.bounds.size.height)
let xPartial = contentSize.width % bounds.size.width
let yPartial = contentSize.height % bounds.size.height
let xSlices = Int((contentSize.width - xPartial) / bounds.size.width)
let ySlices = Int((contentSize.height - yPartial) / bounds.size.height)
var currentOffset = CGPoint(x: 0, y: 0)
var offsets : [[CGPoint]] = []
var rects : [[CGRect]] = []
var xSlicesWithPartial : Int = xSlices
if xPartial > 0 {
xSlicesWithPartial += 1
}
var ySlicesWithPartial : Int = ySlices
if yPartial > 0 {
ySlicesWithPartial += 1
}
for y in 0..<ySlicesWithPartial {
var offsetRow : [CGPoint] = []
var rectRow : [CGRect] = []
currentOffset.x = 0
for x in 0..<xSlicesWithPartial {
if y == ySlices && x == xSlices {
let rect = CGRect(x: bounds.width - xPartial, y: bounds.height - yPartial, width: xPartial, height: yPartial)
rectRow.append(rect)
} else if y == ySlices {
let rect = CGRect(x: 0, y: bounds.height - yPartial, width: bounds.width, height: yPartial)
rectRow.append(rect)
} else if x == xSlices {
let rect = CGRect(x: bounds.width - xPartial, y: 0, width: xPartial, height: bounds.height)
rectRow.append(rect)
} else {
rectRow.append(vanillaBounds)
}
offsetRow.append(currentOffset)
if x == xSlices {
currentOffset.x = contentSize.width - bounds.size.width
} else {
currentOffset.x = currentOffset.x + bounds.size.width
}
}
if y == ySlices {
currentOffset.y = contentSize.height - bounds.size.height
} else {
currentOffset.y = currentOffset.y + bounds.size.height
}
offsets.append(offsetRow)
rects.append(rectRow)
}
return (points:offsets, frames:rects)
}
private func takeScreenshotAtPoint(point point_I: CGPoint, completion: (screenshot: UIImage) -> Void) {
let rect = CGRect(x: 0, y: 0, width: self.bounds.size.width, height: self.bounds.size.height)
let currentOffset = contentOffset
setContentOffset(point_I, animated: false)
delay(0.001) {
UIGraphicsBeginImageContextWithOptions(rect.size, false, UIScreen.mainScreen().scale)
self.drawViewHierarchyInRect(rect, afterScreenUpdates: true)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
self.setContentOffset(currentOffset, animated: false)
completion(screenshot: image)
}
}
private func delay(delay:Double, closure:()->()) {
dispatch_after(
dispatch_time(
DISPATCH_TIME_NOW,
Int64(delay * Double(NSEC_PER_SEC))
),
dispatch_get_main_queue(), closure)
}
private func crop(image image_I:UIImage, toRect rect:CGRect) -> UIImage? {
guard let imageRef: CGImageRef = CGImageCreateWithImageInRect(image_I.CGImage, rect) else {
return nil
}
return UIImage(CGImage:imageRef)
}
private func stitchImages(images images_I: [[UIImage]], finalSize : CGSize) -> UIImage? {
let finalRect = CGRect(x: 0, y: 0, width: finalSize.width, height: finalSize.height)
guard images_I.count > 0 else {
return nil
}
UIGraphicsBeginImageContext(finalRect.size)
var offsetY : CGFloat = 0
for imageRow in images_I {
var offsetX : CGFloat = 0
for image in imageRow {
let width = image.size.width
let height = image.size.height
let rect = CGRect(x: offsetX, y: offsetY, width: width, height: height)
image.drawInRect(rect)
offsetX += width
}
offsetX = 0
if let firstimage = imageRow.first {
offsetY += firstimage.size.height
} // maybe add error handling here
}
let stitchedImages = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return stitchedImages
}
}
extension UIScrollView : ScrollViewImager {
}
Draw the bitmap data of your UICollectionView into a UIImage using UIKit graphics functions. Then you'll have a UIImage that you could save to disk or do whatever you need with it. Something like this should work:
// your collection view
#IBOutlet weak var myCollectionView: UICollectionView!
//...
let image: UIImage!
// draw your UICollectionView into a UIImage
UIGraphicsBeginImageContext(myCollectionView.frame.size)
myCollectionView.layer.renderInContext(UIGraphicsGetCurrentContext()!)
image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
For swift 4 to make screenshot of UICollectionView
func makeScreenShotToShare()-> UIImage{
UIGraphicsBeginImageContextWithOptions(CGSize.init(width: self.colHistory.contentSize.width, height: self.colHistory.contentSize.height + 84.0), false, 0)
colHistory.scrollToItem(at: IndexPath.init(row: 0, section: 0), at: .top, animated: false)
colHistory.layer.render(in: UIGraphicsGetCurrentContext()!)
let row = colHistory.numberOfItems(inSection: 0)
let numberofRowthatShowinscreen = self.colHistory.size.height / (self.arrHistoryData.count == 1 ? 130 : 220)
let scrollCount = row / Int(numberofRowthatShowinscreen)
for i in 0..<scrollCount {
colHistory.scrollToItem(at: IndexPath.init(row: (i+1)*Int(numberofRowthatShowinscreen), section: 0), at: .top, animated: false)
colHistory.layer.render(in: UIGraphicsGetCurrentContext()!)
}
let image:UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext();
return image
}

Moving multiple sprite nodes at once on swift

Can I make an array of SK nodes of which one is selected randomly and brought from the top to bottom of the screen. For example say I have 25 or so different platforms that will be falling out of the sky on a portrait iPhone. I need it to randomly select one of the platforms from the array to start and then after a certain amount of time/ or pixel space randomly select another to continue the same action until reaching the bottom etc. Im new to swift but have a pretty decent understanding of it. I haven't been able to find out how to create an array of SKsprite nodes yet either. Could someone help with this?
So far the only way I've been able to get any sort of effect similar to what I've wanted is by placing each of the nodes off the screen and adding them to a dictionary and making them move like this
class ObstacleStatus {
var isMoving = false
var timeGapForNextRun = Int(0)
var currentInterval = Int(0)
init(isMoving: Bool, timeGapForNextRun: Int, currentInterval: Int) {
self.isMoving = isMoving
self.timeGapForNextRun = timeGapForNextRun
self.currentInterval = currentInterval
}
func shouldRunBlock() -> Bool {
return self.currentInterval > self.timeGapForNextRun
}
and
func moveBlocks(){
for(blocks, ObstacleStatus) in self.blockStatuses {
var thisBlock = self.childNodeWithName(blocks)
var thisBlock2 = self.childNodeWithName(blocks)
if ObstacleStatus.shouldRunBlock() {
ObstacleStatus.timeGapForNextRun = randomNum()
ObstacleStatus.currentInterval = 0
ObstacleStatus.isMoving = true
}
if ObstacleStatus.isMoving {
if thisBlock?.position.y > blockMaxY{
thisBlock?.position.y -= CGFloat(self.fallSpeed)
}else{
thisBlock?.position.y = self.origBlockPosistionY
ObstacleStatus.isMoving = false
}
}else{
ObstacleStatus.currentInterval++
}
}
}
using this for the random function
func randomNum() -> Int{
return randomInt(50, max: 300)
}
func randomInt(min: Int, max:Int) -> Int {
return min + Int(arc4random_uniform(UInt32(max - min + 1)))
}
All this has been doing for me is moving the pieces down at random timed intervals often overlapping them, But increasing the min or max of the random numbers doesn't really have an affect on the actual timing of the gaps. I need to be able to specify a distance or time gap.
One of many possible solutions is to create a falling action sequence which calls itself recursively until no more platform nodes are left. You can control the mean "gap time" and the range of its random variation. Here is a working example (assuming the iOS SpriteKit game template):
import SpriteKit
extension Double {
var cg: CGFloat { return CGFloat(self) }
}
extension Int {
var cg: CGFloat { return CGFloat(self) }
}
func randomInt(range: Range<Int>) -> Int {
return range.startIndex + Int(arc4random_uniform(UInt32(range.endIndex - range.startIndex)))
}
extension Array {
func randomElement() -> Element? {
switch self.count {
case 0: return nil
default: return self[randomInt(0..<self.count)]
}
}
func apply<Ignore>(f: (T) -> (Ignore)) {
for e in self { f(e) }
}
}
class GameScene: SKScene {
var screenWidth: CGFloat { return UIScreen.mainScreen().bounds.size.width }
var screenHeight: CGFloat { return UIScreen.mainScreen().bounds.size.height }
let PlatformName = "Platform"
let FallenPlatformName = "FallenPlatform"
func createRectangularNode(#x: CGFloat, y: CGFloat, width: CGFloat, height: CGFloat) -> SKShapeNode {
let rect = CGRect(x: x, y: y, width: width, height: height)
let path = UIBezierPath(rect: rect)
let node = SKShapeNode(path: path.CGPath)
return node
}
func createPlatformNodes(numNodes: Int, atHeight: CGFloat) -> [SKShapeNode] {
var padding = 20.cg
let width = (screenWidth - padding) / numNodes.cg - padding
padding = (screenWidth - width * numNodes.cg) / (numNodes.cg + 1)
let height = width / 4
var nodes = [SKShapeNode]()
for x in stride(from: padding, to: numNodes.cg * (width + padding), by: width + padding) {
let node = createRectangularNode(x: x, y: atHeight, width: width, height: height)
node.fillColor = SKColor.blackColor()
node.name = PlatformName
nodes.append(node)
}
return nodes
}
func createFallingAction(#by: CGFloat, duration: NSTimeInterval, timeGap: NSTimeInterval, range: NSTimeInterval = 0) -> SKAction {
let gap = SKAction.waitForDuration(timeGap, withRange: range)
// let fall = SKAction.moveToY(toHeight, duration: duration) // moveToY appears to have a bug: behaves as moveBy
let fall = SKAction.moveByX(0, y: -by, duration: duration)
let next = SKAction.customActionWithDuration(0) { [unowned self]
node, time in
node.name = self.FallenPlatformName
self.fallNextNode()
}
return SKAction.sequence([gap, fall, next])
}
func fallNextNode() {
if let nextNode = self[PlatformName].randomElement() as? SKShapeNode {
let falling = createFallingAction(by: screenHeight * 0.7, duration: 1, timeGap: 2.5, range: 2) // mean time gap and random range
nextNode.runAction(falling)
} else {
self.children.apply { ($0 as? SKShapeNode)?.fillColor = SKColor.redColor() }
}
}
override func didMoveToView(view: SKView) {
self.backgroundColor = SKColor.whiteColor()
for platform in createPlatformNodes(7, atHeight: screenHeight * 0.8) {
self.addChild(platform)
}
fallNextNode()
}
}