Check if on correct dispatch queue in Swift 3 - swift

I have a few unit tests in which I'd like to test if a callback is called on the correct dispatch queue.
In Swift 2, I compared the label of the current queue to my test queue. However in Swift 3 the DISPATCH_CURRENT_QUEUE_LABEL constant no longer exists.
I did find the dispatch_assert_queue function. Which seems to be what I need, but I'm not sure how to call it.
My Swift 2 code:
let testQueueLabel = "com.example.my-test-queue"
let testQueue = dispatch_queue_create(testQueueLabel, nil)
let currentQueueLabel = String(UTF8String: dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL))!
XCTAssertEqual(currentQueueLabel, testQueueLabel, "callback should be called on specified queue")
Update:
I got confused by the lack of autocomplete, but it is possible to use __dispatch_assert_queue:
if #available(iOS 10.0, *) {
__dispatch_assert_queue(test1Queue)
}
While this does work for unit tests, it annoyingly stops the whole process with a EXC_BAD_INSTRUCTION instead of only failing a test.

Use dispatchPrecondition(.onQueue(expectedQueue)), the Swift 3 API replacement for the dispatch_assert_queue() C API.
This was covered in the WWDC 2016 GCD session (21:00, Slide 128):
https://developer.apple.com/videos/play/wwdc2016/720/

Answering my own question:
Based on KFDoom's comments, I'm now using setSpecific and getSpecific.
This creates a key, sets it on the test queue, and later on, gets it again:
let testQueueLabel = "com.example.my-test-queue"
let testQueue = DispatchQueue(label: testQueueLabel, attributes: [])
let testQueueKey = DispatchSpecificKey<Void>()
testQueue.setSpecific(key: testQueueKey, value: ())
// ... later on, to test:
XCTAssertNotNil(DispatchQueue.getSpecific(key: testQueueKey), "callback should be called on specified queue")
Note that there's no value associated with the key (its type is Void), I'm only interested in the existence of the specific, not in it's value.
Important!
Make sure to keep a reference to the key, or cleanup after you're done using it. Otherwise a newly created key could use the same memory address, leading to weird behaviour. See: http://tom.lokhorst.eu/2018/02/leaky-abstractions-in-swift-with-dispatchqueue

Tests based on KFDoom's answer:
import XCTest
import Dispatch
class TestQueue: XCTestCase {
func testWithSpecificKey() {
let queue = DispatchQueue(label: "label")
let key = DispatchSpecificKey<Void>()
queue.setSpecific(key:key, value:())
let expectation1 = expectation(withDescription: "main")
let expectation2 = expectation(withDescription: "queue")
DispatchQueue.main.async {
if (DispatchQueue.getSpecific(key: key) == nil) {
expectation1.fulfill()
}
}
queue.async {
if (DispatchQueue.getSpecific(key: key) != nil) {
expectation2.fulfill()
}
}
waitForExpectations(withTimeout: 1, handler: nil)
}
func testWithPrecondition() {
let queue = DispatchQueue(label: "label")
let expectation1 = expectation(withDescription: "main")
let expectation2 = expectation(withDescription: "queue")
DispatchQueue.main.async {
dispatchPrecondition(condition: .notOnQueue(queue))
expectation1.fulfill()
}
queue.async {
dispatchPrecondition(condition: .onQueue(queue))
expectation2.fulfill()
}
waitForExpectations(withTimeout: 1, handler: nil)
}
}

One option is to set a precondition to test directly for the queue or set "specific" on it and retrieve it later. Further, one could use setSpecific and getSpecific. Alternatively, you can use a precondition check if you're on a queue so that should fulfill the "get current" need. src: https://github.com/duemunk/Async/blob/feature/Swift_3.0/AsyncTest/AsyncTests.swift
and
https://github.com/apple/swift/blob/master/stdlib/public/SDK/Dispatch/Dispatch.swift

One related option is to set a Main Queue / UI Queue precondition:
dispatchPrecondition(condition: .onQueue(DispatchQueue.main))

/*
Dispatch queue and NSOperations in Swift 3 Xcode 8
*/
protocol Container {
associatedtype ItemType
var count: Int { get }
mutating func pop()
mutating func push(item: ItemType)
mutating func append(item: ItemType)
subscript(i: Int) -> ItemType { get }
}
//Generic Function
struct GenericStack<Element> : Container {
mutating internal func push(item: Element) {
items.append(item)
}
mutating internal func pop() {
items.removeLast()
}
var items = [ItemType]()
internal subscript(i: Int) -> Element {
return items[i]
}
mutating internal func append(item: Element) {
self.push(item: item)
}
internal var count: Int { return items.count }
typealias ItemType = Element
}
var myGenericStack = GenericStack<String>()
myGenericStack.append(item: "Narendra")
myGenericStack.append(item: "Bade")
myGenericStack.count
myGenericStack.pop()
myGenericStack.count
//Some NSOperation
class ExploreOperationAndThread {
func performOperation() {
//Create queue
let queue = OperationQueue()
let operation1 = BlockOperation {
var count = myGenericStack.count
while count > 0 {
myGenericStack.pop()
count -= 1
}
}
operation1.completionBlock = {
print("Operation 1")
}
let operation2 = BlockOperation {
var count = 0
while count == 10 {
myGenericStack.append(item: "ItemAdded")
count += 1
}
}
operation2.completionBlock = {
print("Operation 2")
print(myGenericStack.items)
}
//Suppose operation 3 is related to UI
let operation3 = BlockOperation {
//run on main thread
DispatchQueue.main.async {
print(myGenericStack.items.count)
}
}
operation3.completionBlock = {
print("Operation 3")
print(myGenericStack.items.count)
}
//add operation into queue
queue.addOperation(operation3)
queue.addOperation(operation1)
queue.addOperation(operation2)
//Limit number of concurrent operation in queue
queue.maxConcurrentOperationCount = 1
//add dependancies
operation1.addDependency(operation2)
operation2.addDependency(operation3)
if myGenericStack.items.count == 0 {
//remove dependency
operation1.removeDependency(operation2)
}
}
}
//Other ways of using queues
DispatchQueue.global(qos: .userInitiated).async {
ExploreOperationAndThread().performOperation()
}
DispatchQueue.main.async {
print("I am performing operation on main theread asynchronously")
}
OperationQueue.main.addOperation {
var count = 0
while count == 10 {
myGenericStack.append(item: "Narendra")
count += 1
}
}
DispatchQueue.main.asyncAfter(deadline: .now() + 1.5 , execute: {
ExploreOperationAndThread().performOperation()
})
let queue2 = DispatchQueue(label: "queue2") //Default is serial queue
queue2.async {
print("asynchronously")
}

Related

Swift: Thread-safe dictionary access via cocoa-bindings

I have a class and I need to bind a few NSTextFields to some values of a dictionary that will be changed by a thread.
class Test: NSObject {
#objc dynamic var dict : [String:Int] = [:]
let queue = DispatchQueue(label: "myQueue", attributes: .concurrent)
func changeValue() {
queue.async(flags: .barrier) {
self.dict["Key1"] = Int.random(in: 1..<100)
}
}
func readValue() -> Int? {
queue.sync {
return self.dict["Key1"]
}
}
}
As far as I understood this is the way to do (so not accessing the variable directly but through a func that handles the queue.
But what when I try to bind a NSTextField to "Key1" of the dict using cocoa bindings?
Binding to the variable "dict" directly works in my tests but I'm not sure (I'm quite sure it isn't) if this is thread safe.
What would be the correct way to do this?
Edit:
This code example looks legit but fails for some reason
class Test: NSObject {
var _dict : [String:Int] = ["Key1":1]
let queue = DispatchQueue(label: "myQueue", attributes: .concurrent)
#objc dynamic var dict:[String:Int] {
get {
queue.sync { return self._dict }
}
set {
queue.async(flags: .barrier) { self._dict = newValue }
}
}
func changeValue() {
queue.async(flags: .barrier) {
// This change will not be visible in the bound object
self._dict["Key1"] = Int.random(in: 1..<100)
// This causes a crash
self.dict = ["Key1":2]
}
}
}

How do you aggregate data from DispatchQueue.concurrentPerform() using GCD?

How is one supposed to aggregate data when using Grand Central Dispatch's ConcurrentPerform()?
I am doing what is in the code below, but resultDictionary seems to lose all its data when the notify() block ends. Thus all I get is an empty dictionary that is returned from the function.
I am not sure why this is happening, because when I print or set a breakpoint I can see there is something in the resultDictionary before the block ends.
let getCVPDispatchQueue = DispatchQueue(label: "blarg",
qos: .userInitiated,
attributes: .concurrent)
let getCVPDispatchGroup = DispatchGroup()
var resultDictionary = dataIDToSRLParticleDictionary()
getCVPDispatchQueue.async { [weak self] in
guard let self = self else { return }
DispatchQueue.concurrentPerform(iterations: self.dataArray.count) { [weak self] (index) in
guard let self = self else { return }
let data = self.dataArray[index]
getCVPDispatchGroup.enter()
let theResult = data.runPartcleFilterForClosestParticleAndMaybeStopAudio()
switch theResult {
case .success(let CVParticle):
// If there was a CVP found, add it to the set.
if let theCVParticle = CVParticle {
self.dataIDsToCVPDictionary.addTodataIDToCVPDict(key: data.ID,
value: theCVParticle)
}
case .failure(let error):
os_log(.error, log: self.logger, "rundatasProcessing error: %s", error.localizedDescription)
self._isActive = false
}
getCVPDispatchGroup.leave()
}
getCVPDispatchGroup.notify(queue: .main) { [weak self] in
guard let self = self else { return }
print("DONE with \(self.dataIDsToCVPDictionary.getDictionary.count)")
resultDictionary = self.dataIDsToCVPDictionary.getDictionary
print("resultDictionary has \(self.dataIDsToCVPDictionary.getDictionary.count)")
}
}
print("Before Return with \(resultDictionary.count)")
return resultDictionary
}
Not sure if this will help, but this is simple class I made to made accessing the dictionary thread safe.
class DATASynchronizedIDToParticleDictionary {
var unsafeDictionary: DATAIDToDATAParticleDictionary = DATAIDToDATAParticleDictionary()
let accessQueue = DispatchQueue(label: "blarg2",
qos: .userInitiated,
attributes: .concurrent)
var getDictionary: DATAIDToDATAParticleDictionary {
get {
var dictionaryCopy: DATAIDToDATAParticleDictionary!
accessQueue.sync {
dictionaryCopy = unsafeDictionary
}
return dictionaryCopy
}
}
func addToDATAIDToCVPDict(key: String, value: DATAParticle) {
accessQueue.async(flags: .barrier) { [weak self] in
guard let self = self else { return }
self.unsafeDictionary[key] = value
}
}
func clearDictionary() {
accessQueue.async(flags: .barrier) { [weak self] in
guard let self = self else { return }
self.unsafeDictionary.removeAll()
}
}
}
You said:
I am doing what is in the code below, but resultDictionary seems to lose all its data when the notify() block ends. Thus all I get is an empty dictionary that is returned from the function.
The issue is that you’re trying to return a value that is calculated asynchronously. You likely want to shift to a completion block pattern.
As an aside, the dispatch group is not necessary. Somewhat ironically, the concurrentPerform is synchronous (i.e. it doesn’t proceed until the parallelized for loop is finished). So there’s no point in using notify if you know that you won’t get to the line after the concurrentPerform until all the iterations are done.
I’d also discourage having the concurrentPerform loop update properties. It exposes you to a variety of problems. E.g. what if the main thread was interacting with that object at the same time? Sure, you can synchronize your access, but it may be incomplete. It’s probably safer to have it update local variables only, and have the caller do the property update in its completion handler block. Obviously, you can go ahead and update properties (esp if you want to update your UI to reflect the in-flight progress), but it adds an additional wrinkle to the code that might not be necessary. Below, I’ve assumed it wasn’t necessary.
Also, while I appreciate the intent behind all of these [weak self] references, they’re really not needed, especially in your synchronization class DATASynchronizedIDToParticleDictionary. We often use weak references to avoid strong reference cycles. But if you don’t have strong references, they just add overhead unless you have some other compelling need.
OK, so let’s dive into the code.
First, I’d retire the specialized DATASynchronizedIDToParticleDictionary with a general-purpose generic:
class SynchronizedDictionary<Key: Hashable, Value> {
private var _dictionary: [Key: Value]
private let queue = DispatchQueue(label: Bundle.main.bundleIdentifier! + ".dictionary", qos: .userInitiated, attributes: .concurrent)
init(_ dictionary: [Key: Value] = [:]) {
_dictionary = dictionary
}
var dictionary: [Key: Value] {
queue.sync { _dictionary }
}
subscript(key: Key) -> Value? {
get { queue.sync { _dictionary[key] } }
set { queue.async(flags: .barrier) { self._dictionary[key] = newValue } }
}
func removeAll() {
queue.async(flags: .barrier) {
self._dictionary.removeAll()
}
}
}
Note, I’ve removed the unnecessary weak references. I’ve also renamed addToDATAIDToCVPDict and clearDictionary with a more natural subscript operator and a removeAll method that more closely mirrors the interface of the underlying Dictionary type. It results in more natural looking code. (And because this is a generic, we can use it for any dictionary that needs this sort of low level synchronization.)
Anyway, you can now declare a synchronized rendition of the dictionary like so:
let particles = SynchronizedDictionary(dataIDToSRLParticleDictionary())
And when I want to update the dictionary with some value, you can do:
particles[data.ID] = theCVParticle
And when I want retrieve actual underlying, wrapped dictionary, I can do:
let finalResult = particles.dictionary
While we’re at it, since we might want to keep track of an array of errors that needs to be synchronized, I might add an array equivalent type:
class SynchronizedArray<Value> {
private var _array: [Value]
private let queue = DispatchQueue(label: Bundle.main.bundleIdentifier! + ".array", qos: .userInitiated, attributes: .concurrent)
init(_ dictionary: [Value] = []) {
_array = dictionary
}
var array: [Value] {
queue.sync { _array }
}
subscript(index: Int) -> Value {
get { queue.sync { _array[index] } }
set { queue.async(flags: .barrier) { self._array[index] = newValue } }
}
func append(_ value: Value) {
queue.async(flags: .barrier) {
self._array.append(value)
}
}
func removeAll() {
queue.async(flags: .barrier) {
self._array.removeAll()
}
}
}
We can now turn our attention to the main routine. So rather than returning a value, we’ll instead give it an #escaping completion handler. And, as discussed above, we’d retire the unnecessary dispatch group:
func calculateAllClosestParticles(completion: #escaping ([String: CVParticle], [Error]) -> Void) {
let queue = DispatchQueue(label: "blarg", qos: .userInitiated, attributes: .concurrent)
let particles = SynchronizedDictionary(dataIDToSRLParticleDictionary())
let errors = SynchronizedArray<Error>()
queue.async {
DispatchQueue.concurrentPerform(iterations: self.dataArray.count) { index in
let data = self.dataArray[index]
let result = data.runPartcleFilterForClosestParticleAndMaybeStopAudio()
switch result {
case .success(let cvParticle):
// If there was a CVP found, add it to the set.
if let cvParticle = cvParticle {
particles[data.ID] = cvParticle
}
case .failure(let error):
errors.append(error)
}
}
DispatchQueue.main.async {
completion(particles.dictionary, errors.array)
}
}
}
Now, I don’t know what the right types were for the dictionary, so you might need to adjust the parameters of the completion. And you didn’t provide the rest of the routines, so I may have some details wrong here. But don’t get lost in the details, but just note the scrupulous avoidance of properties within the concurrentPerform and the passing of the results back in the completion handler.
You’d call it like so:
calculateAllClosestParticles { dictionary, errors in
guard errors.isEmpty else { return }
// you can access the dictionary and updating the model and UI here
self.someProperty = dictionary
self.tableView.reloadData()
}
// but don't try to access the dictionary here, because the asynchronous code hasn't finished yet
//
FWIW, while I used the reader-writer pattern you did in your example, in my experience, NSLock is actually more performant for quick synchronizations, especially when you are using concurrentPerform that might tie up all of the cores on your CPU, e.g.
class SynchronizedDictionary<Key: Hashable, Value> {
private var _dictionary: [Key: Value]
private let lock = NSLock()
init(_ dictionary: [Key: Value] = [:]) {
_dictionary = dictionary
}
var dictionary: [Key: Value] {
lock.synchronized { _dictionary }
}
subscript(key: Key) -> Value? {
get { lock.synchronized { _dictionary[key] } }
set { lock.synchronized { _dictionary[key] = newValue } }
}
func removeAll() {
lock.synchronized {
_dictionary.removeAll()
}
}
}
Where
extension NSLocking {
func synchronized<T>(_ closure: () throws -> T) rethrows -> T {
lock()
defer { unlock() }
return try closure()
}
}
Bottom line, you don’t want to force context switches for synchronization if you don’t have to.
When doing concurrent perform, if you have many dataPoints and if the time required by each call to runPartcleFilterForClosestParticleAndMaybeStopAudio is modest, you might want to consider “striding”, doing several datapoint in each iteration. It’s beyond the scope of this question, but just a FYI.
Not exactly sure what I did, but I moved the
resultDictionary = self.dataIDsToCVPDictionary.getDictionary
outside the first async block and that seem to allowed the data to be retained/remain for the function return.

Swift Combine: Check if Subject has observer?

In RxSwift we can check if a *Subject has any observer, using hasObserver, how can I do this in Combine on e.g. a PassthroughSubject?
Some time after posting my question I wrote this simple extension. Much simpler than #Asperi's solution. Not sure about disadvantages/advantages between the two solutions besides simplicity (of mine).
private enum CounterChange: Int, Equatable {
case increased = 1
case decreased = -1
}
extension Publisher {
func trackNumberOfSubscribers(
_ notifyChange: #escaping (Int) -> Void
) -> AnyPublisher<Output, Failure> {
var counter = NSNumber.init(value: 0)
let nsLock = NSLock()
func updateCounter(_ change: CounterChange, notify: (Int) -> Void) {
nsLock.lock()
counter = NSNumber(value: counter.intValue + change.rawValue)
notify(counter.intValue)
nsLock.unlock()
}
return handleEvents(
receiveSubscription: { _ in updateCounter(.increased, notify: notifyChange) },
receiveCompletion: { _ in updateCounter(.decreased, notify: notifyChange) },
receiveCancel: { updateCounter(.decreased, notify: notifyChange) }
).eraseToAnyPublisher()
}
}
Here are some tests:
import XCTest
import Combine
final class PublisherTrackNumberOfSubscribersTest: TestCase {
func test_four_subscribers_complete_by_finish() {
doTest { publisher in
publisher.send(completion: .finished)
}
}
func test_four_subscribers_complete_by_error() {
doTest { publisher in
publisher.send(completion: .failure(.init()))
}
}
}
private extension PublisherTrackNumberOfSubscribersTest {
struct EmptyError: Swift.Error {}
func doTest(_ line: UInt = #line, complete: (PassthroughSubject<Int, EmptyError>) -> Void) {
let publisher = PassthroughSubject<Int, EmptyError>()
var numberOfSubscriptions = [Int]()
let trackable = publisher.trackNumberOfSubscribers { counter in
numberOfSubscriptions.append(counter)
}
func subscribe() -> Cancellable {
return trackable.sink(receiveCompletion: { _ in }, receiveValue: { _ in })
}
let cancellable1 = subscribe()
let cancellable2 = subscribe()
let cancellable3 = subscribe()
let cancellable4 = subscribe()
XCTAssertNotNil(cancellable1, line: line)
XCTAssertNotNil(cancellable2, line: line)
XCTAssertNotNil(cancellable3, line: line)
XCTAssertNotNil(cancellable4, line: line)
cancellable1.cancel()
cancellable2.cancel()
complete(publisher)
XCTAssertEqual(numberOfSubscriptions, [1, 2, 3, 4, 3, 2, 1, 0], line: line)
}
}
No one time needed this... Apple does not provide this by API, and, actually, I do not recommend such thing, because it is like manually checking value of retainCount in pre-ARC Objective-C for some decision in code.
Anyway it is possible. Let's consider it as a lab exercise. Hope someone find this helpful.
Disclaimer: below code was not tested with all Publisher(s) and not safe as for some real-world project. It is just approach demo.
So, as there are many kind of publishers and all of them are final and private and, moreover there might be come via type-eraser, we needed generic thing applying to any publisher, thus operator
extension Publisher {
public func countingSubscribers(_ callback: ((Int) -> Void)? = nil)
-> Publishers.SubscribersCounter<Self> {
return Publishers.SubscribersCounter<Self>(upstream: self, callback: callback)
}
}
Operator gives us possibility to inject in any place of of publishers chain and provide interesting value via callback. Interesting value in our case will be count of subscribers.
As operator is injected in both Upstream & Downstream we need bidirectional custom pipe implementation, ie. custom publisher, custom subscriber, custom subscription. In our case they must be transparent, as we don't need to modify streams... actually it will be Combine-proxy.
Posible usage:
1) when SubscribersCounter publisher is last in chain, the numberOfSubscribers property can be used directly
let publisher = NotificationCenter.default
.publisher(for: UIApplication.didBecomeActiveNotification)
.countingSubscribers()
...
publisher.numberOfSubscribers
2) when it somewhere in the middle of the chain, then receive callback about changed subscribers count
let publisher = URLSession.shared
.dataTaskPublisher(for: URL(string: "https://www.google.com")!)
.countingSubscribers({ count in print("Observers: \(count)") })
.receive(on: DispatchQueue.main)
.map { _ in "Data received" }
.replaceError(with: "An error occurred")
Here is implementation:
import Combine
extension Publishers {
public class SubscribersCounter<Upstream> : Publisher where Upstream : Publisher {
private(set) var numberOfSubscribers = 0
public typealias Output = Upstream.Output
public typealias Failure = Upstream.Failure
public let upstream: Upstream
public let callback: ((Int) -> Void)?
public init(upstream: Upstream, callback: ((Int) -> Void)?) {
self.upstream = upstream
self.callback = callback
}
public func receive<S>(subscriber: S) where S : Subscriber,
Upstream.Failure == S.Failure, Upstream.Output == S.Input {
self.increase()
upstream.receive(subscriber: SubscribersCounterSubscriber<S>(counter: self, subscriber: subscriber))
}
fileprivate func increase() {
numberOfSubscribers += 1
self.callback?(numberOfSubscribers)
}
fileprivate func decrease() {
numberOfSubscribers -= 1
self.callback?(numberOfSubscribers)
}
// own subscriber is needed to intercept upstream/downstream events
private class SubscribersCounterSubscriber<S> : Subscriber where S: Subscriber {
let counter: SubscribersCounter<Upstream>
let subscriber: S
init (counter: SubscribersCounter<Upstream>, subscriber: S) {
self.counter = counter
self.subscriber = subscriber
}
deinit {
Swift.print(">> Subscriber deinit")
}
func receive(subscription: Subscription) {
subscriber.receive(subscription: SubscribersCounterSubscription<Upstream>(counter: counter, subscription: subscription))
}
func receive(_ input: S.Input) -> Subscribers.Demand {
return subscriber.receive(input)
}
func receive(completion: Subscribers.Completion<S.Failure>) {
subscriber.receive(completion: completion)
}
typealias Input = S.Input
typealias Failure = S.Failure
}
// own subcription is needed to handle cancel and decrease
private class SubscribersCounterSubscription<Upstream>: Subscription where Upstream: Publisher {
let counter: SubscribersCounter<Upstream>
let wrapped: Subscription
private var cancelled = false
init(counter: SubscribersCounter<Upstream>, subscription: Subscription) {
self.counter = counter
self.wrapped = subscription
}
deinit {
Swift.print(">> Subscription deinit")
if !cancelled {
counter.decrease()
}
}
func request(_ demand: Subscribers.Demand) {
wrapped.request(demand)
}
func cancel() {
wrapped.cancel()
if !cancelled {
cancelled = true
counter.decrease()
}
}
}
}
}

Working with Swift completion handlers for chained functions

I am chaining some functions together and I can't figure out how to call a completion handler with a return value once all the functions are done running.
class AirQualityProvider {
var aBlock: ((Int?) -> Void)?
func getAirQuality(completion: #escaping (Int?) -> Void) {
aBlock = completion
callAPI()
}
private func callAPI() {
let data = Data()
parseDataForAQI(data: data)
}
private func parseDataForAQI(data: Data) {
for d in data {
dosomeMath(d)
}
}
private func dosomeMath(data: Int) {
// HERE IS WHERE I WANT IT TO SUM UP ALL THE NUMBERS
THEN ONLY RETURN ONE VALUE using a completion handler.
Currently, it returns the average as it is being generated.
}
Almost got it working with help to Alexander. The code Alexander supplied works perfectly, it is amazing. The issue is, when I run taskrunner inside alamofire it returns empty. Outside alamofire it works as usual. I need to run this inside alamofire.
func A(json : JSON){
for (key,subJson) in json{
if subJson["free"].doubleValue > 0.0 {
func B(asset: subJson["asset"].stringValue, json: subJson)
}
}
print(taskRunner.getResults())
}
func B(asset : String, json : JSON){
//OUTSIDE ALAMOFIRE WORKS
self.taskRunner.execute{
return 100
}
Alamofire.request(url).responseJSON { response in
//INSIDE ALAMOFIRE DOESN'T WORK. Returns []
self.taskRunner.execute{
return 100
}
}
}
I would use a dispatch queue to synchronize the aggregation of results (by synchronizing Array.append(_:) calls, and the subsequent reading of the array). Here's a simple example:
import Dispatch
import Foundation
class ParallelTaskRunner<Result> {
private var results = [Result]()
private let group = DispatchGroup()
private let resultAggregatorQueue = DispatchQueue(label: "Result Aggregator")
func execute(_ closure: (#escaping (Result) -> Void) -> Void) {
group.enter() // Register that a new task is in-flight
closure { result in
self.resultAggregatorQueue.sync { // Synchronize access to the array
self.results.append(result) // Record the result
}
self.group.leave() // This task is done
}
}
func getResults() -> [Result] {
group.wait() // Make sure all in-flight tasks are done
return resultAggregatorQueue.sync { return results }
}
}
let taskQueue = DispatchQueue(label: "Task Queue", attributes: .concurrent)
let taskRunner = ParallelTaskRunner<Int>()
for i in 0...100 {
taskRunner.execute { completionHandler in
taskQueue.async { // Simulated async computation
let randomTime = 3.0
print("Sleeping for \(randomTime)")
Thread.sleep(forTimeInterval: randomTime) // Simulates intesnive computation
let result = i // Simulate a result
completionHandler(result)
}
}
}
print(taskRunner.getResults()) // Oh look, all the results are here! :D

Block not really executing?

I'm trying to use a subclass of NSThread to run some commands. Before you recommend NSOperation or GCD, YES I need to use threads.
Below is my code and my output. The block is being created and added to the array and supposedly is being dequeued and run by the thread, but I don't see any output resulting from the running of my block. Why not?
import UIKit
class ViewController: UIViewController {
private let _queue = dispatch_queue_create("com.AaronLBratcher.ALBQueue", nil)
private let _thread = TestThread()
private let _lock = NSCondition()
override func viewDidLoad() {
super.viewDidLoad()
_thread.start()
var openSuccessful = false
dispatch_sync(_queue) {[unowned self] () -> Void in
self._lock.lock()
self._thread.openFile("file path here", completion: { (successful) -> Void in
print("completion block running...")
openSuccessful = successful
self._lock.signal()
self._lock.unlock()
})
self._lock.wait()
}
print("open operation complete")
print(openSuccessful)
}
final class TestThread:NSThread {
var _iterations = 0
var _lock = NSCondition()
var _blocks = [Any]()
func openFile(FilePath:String, completion:(successful:Bool) -> Void) {
print("queueing openFile...")
let block = {[unowned self] in
self._iterations = self._iterations + 1
print("opening file...")
completion(successful: true)
}
addBlock(block)
}
func addBlock(block:Any) {
_lock.lock()
_blocks.append(block)
_lock.signal()
_lock.unlock()
}
override func main() {
_lock.lock()
while true {
while _blocks.count == 0 {
print("waiting...")
_lock.wait()
}
print("extracting block...")
if let block = _blocks.first {
_blocks.removeFirst()
_lock.unlock()
print("running block...")
block;
}
_lock.lock()
}
}
}
}
Output:
queueing openFile...
waiting...
extracting block...
running block...
waiting...
The block isn't running because you just have block. You need block(), e.g.:
if let block = _blocks.first as? ()->() {
_blocks.removeFirst()
_lock.unlock()
print("running block...")
block()
}