Swift pthread read/write lock taking a while to release the lock - swift

I am trying to implement a read/write lock in Swift with the pthread API's and I have come across a strange issue.
My implementation is largely based on the following with the addition of a timeout for attempted read locks.
http://swiftweb.johnholdsworth.com/Deferred/html/ReadWriteLock.html
Here is my implementation:
public final class ReadWriteLock {
private var lock = pthread_rwlock_t()
public init() {
let status = pthread_rwlock_init(&lock, nil)
assert(status == 0)
}
deinit {
let status = pthread_rwlock_destroy(&lock)
assert(status == 0)
}
#discardableResult
public func withReadLock<Result>(_ body: () throws -> Result) rethrows -> Result {
pthread_rwlock_rdlock(&lock)
defer { pthread_rwlock_unlock(&lock) }
return try body()
}
#discardableResult
public func withAttemptedReadLock<Result>(_ body: () throws -> Result) rethrows -> Result? {
guard pthread_rwlock_tryrdlock(&lock) == 0 else { return nil }
defer { pthread_rwlock_unlock(&lock) }
return try body()
}
#discardableResult
public func withAttemptedReadLock<Result>(_ timeout: Timeout = .now, body: () throws -> Result) rethrows -> Result? {
guard timeout != .now else { return try withAttemptedReadLock(body) }
let expiry = DispatchTime.now().uptimeNanoseconds + timeout.rawValue.uptimeNanoseconds
var ts = Timeout.interval(1).timespec
var result: Int32
repeat {
result = pthread_rwlock_tryrdlock(&lock)
guard result != 0 else { break }
nanosleep(&ts, nil)
} while DispatchTime.now().uptimeNanoseconds < expiry
// If the lock was not acquired
if result != 0 {
// Try to grab the lock once more
result = pthread_rwlock_tryrdlock(&lock)
}
guard result == 0 else { return nil }
defer { pthread_rwlock_unlock(&lock) }
return try body()
}
#discardableResult
public func withWriteLock<Return>(_ body: () throws -> Return) rethrows -> Return {
pthread_rwlock_wrlock(&lock)
defer { pthread_rwlock_unlock(&lock) }
return try body()
}
}
/// An amount of time to wait for an event.
public enum Timeout {
/// Do not wait at all.
case now
/// Wait indefinitely.
case forever
/// Wait for a given number of seconds.
case interval(UInt64)
}
public extension Timeout {
public var timespec: timespec {
let nano = rawValue.uptimeNanoseconds
return Darwin.timespec(tv_sec: Int(nano / NSEC_PER_SEC), tv_nsec: Int(nano % NSEC_PER_SEC))
}
public var rawValue: DispatchTime {
switch self {
case .now:
return DispatchTime.now()
case .forever:
return DispatchTime.distantFuture
case .interval(let milliseconds):
return DispatchTime(uptimeNanoseconds: milliseconds * NSEC_PER_MSEC)
}
}
}
extension Timeout : Equatable { }
public func ==(lhs: Timeout, rhs: Timeout) -> Bool {
switch (lhs, rhs) {
case (.now, .now):
return true
case (.forever, .forever):
return true
case (let .interval(ms1), let .interval(ms2)):
return ms1 == ms2
default:
return false
}
}
Here is my unit test:
func testReadWrite() {
let rwLock = PThreadReadWriteLock()
let queue = OperationQueue()
queue.maxConcurrentOperationCount = 2
queue.qualityOfService = .userInteractive
queue.isSuspended = true
var enterWrite: Double = 0
var exitWrite: Double = 0
let writeWait: UInt64 = 500
// Get write lock
queue.addOperation {
enterWrite = Double(Timeout.now.rawValue.uptimeNanoseconds) / Double(NSEC_PER_MSEC)
rwLock.withWriteLock {
// Sleep for 1 second
var ts = Timeout.interval(writeWait).timespec
var result: Int32
repeat { result = nanosleep(&ts, &ts) } while result == -1
}
exitWrite = Double(Timeout.now.rawValue.uptimeNanoseconds) / Double(NSEC_PER_MSEC)
}
var entered = false
var enterRead: Double = 0
var exitRead: Double = 0
let readWait = writeWait + 50
// Get read lock
queue.addOperation {
enterRead = Double(Timeout.now.rawValue.uptimeNanoseconds) / Double(NSEC_PER_MSEC)
rwLock.withAttemptedReadLock(.interval(readWait)) {
print("**** Entered! ****")
entered = true
}
exitRead = Double(Timeout.now.rawValue.uptimeNanoseconds) / Double(NSEC_PER_MSEC)
}
queue.isSuspended = false
queue.waitUntilAllOperationsAreFinished()
let startDifference = abs(enterWrite - enterRead)
let totalWriteTime = abs(exitWrite - enterWrite)
let totalReadTime = abs(exitRead - enterRead)
print("Start Difference: \(startDifference)")
print("Total Write Time: \(totalWriteTime)")
print("Total Read Time: \(totalReadTime)")
XCTAssert(totalWriteTime >= Double(writeWait))
XCTAssert(totalReadTime >= Double(readWait))
XCTAssert(totalReadTime >= totalWriteTime)
XCTAssert(entered)
}
Finally, the output of my unit test is the following:
Start Difference: 0.00136399269104004
Total Write Time: 571.76081609726
Total Read Time: 554.105705976486
Of course, the test is failing because the write lock is not released in time. Given that my wait time is only half a second (500ms), why is it taking roughly 570ms for the write lock to execute and release?
I have tried executing with optimizations both on and off to no avail.
I was under the impression that nanosleep is high resolution sleep timer I would expect to have a resolution of at least 5-10 milliseconds here for the lock timeout.
Can anyone shed some light here?

Turns out foundation was performing some kind of optimization with the OperationQueue due to the long sleep in my unit test.
Replacing the sleep function with usleep and iterating with a 1ms sleep until the total time is exceed seems to have fixed the problem.
// Get write lock
queue.addOperation {
enterWrite = Double(Timeout.now.rawValue.uptimeNanoseconds) / Double(NSEC_PER_MSEC)
rwLock.withWriteLock {
let expiry = DispatchTime.now().uptimeNanoseconds + Timeout.interval(writeWait).rawValue.uptimeNanoseconds
let interval = Timeout.interval(1)
repeat {
interval.sleep()
} while DispatchTime.now().uptimeNanoseconds < expiry
}
exitWrite = Double(Timeout.now.rawValue.uptimeNanoseconds) / Double(NSEC_PER_MSEC)
}

Related

Unexpected race condition with arrays

I am trying to design a simple coffee machine system, that uses three resources which are: milk, coffee, water.
For each order I create a new thread, also for each thread I create another three threads one for each resource, in order to not wait for a free resource while accessing them serially.
According to the resources, I stored them in a simple array, and protect each index (which represent one resource of the three) with a lock, but unfortunately a race condition occurred when I turned on the TSAN.
I am pretty sure that no two threads will access the same index at the same time, and to be more confident I changed the resources from being inside an array to a seperate variables and no race occurred, so why when accessing the same array while protecting each index by a lock resulted in a race condition.
The following code is the one with resources array (the one that has race condition):
import Foundation
enum Resources: Int {
case milk
case coffe
case water
}
struct Order {
var id: Int
var requiredMilk: Int
var requiredCoffe: Int
var requiredWater: Int
init(requiredMilk: Int, requiredCoffe: Int, requiredWater: Int) {
id = Int.random(in: 0...Int.max)
self.requiredMilk = requiredMilk
self.requiredCoffe = requiredCoffe
self.requiredWater = requiredWater
}
}
var resources: [Int] = []
let dispatchGroup = DispatchGroup()
let queue = DispatchQueue(label: "Ordering Queue", attributes: .concurrent)
let locks = [NSLock(), NSLock(), NSLock()]
func initiateResources(initialMilk: Int, initialCoffe: Int, initialWater: Int) {
resources.append(contentsOf: [initialMilk, initialCoffe, initialWater])
}
func getAvailableMilk() -> Int {
return resources[Resources.milk.rawValue]
}
func getAvailableCoffe() -> Int {
return resources[Resources.coffe.rawValue]
}
func getAvailableWater() -> Int {
return resources[Resources.water.rawValue]
}
func handleOrder(_ order: Order) {
dispatchGroup.enter()
// pushing the current order to the queue in order to be handled concurrenlty
queue.async {
let done = prepareOrder(order)
if done {
//print("Order with id \(order.id) prepared successfully")
} else {
//print("No sufficient resources for order with id \(order.id)")
}
dispatchGroup.leave()
}
}
func prepareOrder(_ order: Order) -> Bool {
var can = true
let internalQueue = DispatchQueue(label: "internal queue", attributes: .concurrent)
let internalDispatchGroup = DispatchGroup()
let internalLock = NSLock()
// opening a new thraed for each type of resources in order to not wait for a free resource
if order.requiredMilk != 0 {
internalDispatchGroup.enter()
internalQueue.async {
locks[Resources.milk.rawValue].lock()
if getAvailableMilk() < order.requiredMilk {
locks[Resources.milk.rawValue].unlock()
internalLock.lock()
can = false
internalLock.unlock()
} else {
//Thread.sleep(forTimeInterval: 3)
resources[Resources.milk.rawValue] -= order.requiredMilk
locks[Resources.milk.rawValue].unlock()
}
internalDispatchGroup.leave()
}
}
if order.requiredCoffe != 0 {
internalDispatchGroup.enter()
internalQueue.async {
locks[Resources.coffe.rawValue].lock()
if getAvailableCoffe() < order.requiredCoffe {
locks[Resources.coffe.rawValue].unlock()
internalLock.lock()
can = false
internalLock.unlock()
} else {
//Thread.sleep(forTimeInterval: 2)
resources[Resources.coffe.rawValue] -= order.requiredCoffe
locks[Resources.coffe.rawValue].unlock()
}
internalDispatchGroup.leave()
}
}
if order.requiredWater != 0 {
internalDispatchGroup.enter()
internalQueue.async {
locks[Resources.water.rawValue].lock()
if getAvailableWater() < order.requiredWater {
locks[Resources.water.rawValue].unlock()
internalLock.lock()
can = false
internalLock.unlock()
} else {
//Thread.sleep(forTimeInterval: 1)
resources[Resources.water.rawValue] -= order.requiredWater
locks[Resources.water.rawValue].unlock()
}
internalDispatchGroup.leave()
}
}
// making sure that the three resources threads funished successfully
internalDispatchGroup.wait()
return can
}
func endWork() {
dispatchGroup.wait()
}
// note that I commented the thread.sleep in order to run faster, but every resource should have different serving time
initiateResources(initialMilk: 10000, initialCoffe: 10000, initialWater: 10000)
for _ in 0..<10 {
handleOrder(Order(requiredMilk: 5, requiredCoffe: 5, requiredWater: 5))
}
endWork()
print(resources[0])
print(resources[1])
print(resources[2])
/*
running 90 threads ech need 5 units from each resource ==> total of 450 units
So reaming resources should be 9550 from each type if no confclicts occured
*/
The following is the same one but whit separate variables for each resource (no race condition):
import Foundation
enum Resources: Int {
case milk
case coffe
case water
}
struct Order {
var id: Int
var requiredMilk: Int
var requiredCoffe: Int
var requiredWater: Int
init(requiredMilk: Int, requiredCoffe: Int, requiredWater: Int) {
id = Int.random(in: 0...Int.max)
self.requiredMilk = requiredMilk
self.requiredCoffe = requiredCoffe
self.requiredWater = requiredWater
}
}
var milkRsource = 0, coffeResource = 0, waterResource = 0
let dispatchGroup = DispatchGroup()
let queue = DispatchQueue(label: "Ordering Queue", attributes: .concurrent)
let milkLock = NSLock()
let coffeLock = NSLock()
let waterLock = NSLock()
func initiateResources(initialMilk: Int, initialCoffe: Int, initialWater: Int) {
milkRsource = initialMilk
coffeResource = initialCoffe
waterResource = initialWater
}
func getAvailableMilk() -> Int {
return milkRsource
}
func getAvailableCoffe() -> Int {
return coffeResource
}
func getAvailableWater() -> Int {
return waterResource
}
func handleOrder(_ order: Order) {
dispatchGroup.enter()
// pushing the current order to the queue in order to be handled concurrenlty
queue.async {
let done = prepareOrder(order)
if done {
//print("Order with id \(order.id) prepared successfully")
} else {
//print("No sufficient resources for order with id \(order.id)")
}
dispatchGroup.leave()
}
}
func prepareOrder(_ order: Order) -> Bool {
var can = true
let internalQueue = DispatchQueue(label: "internal queue", attributes: .concurrent)
let internalDispatchGroup = DispatchGroup()
let internalLock = NSLock()
// opening a new thraed for each type of resources in order to not wait for a free resource
if order.requiredMilk != 0 {
internalDispatchGroup.enter()
internalQueue.async {
milkLock.lock()
if getAvailableMilk() < order.requiredMilk {
milkLock.unlock()
internalLock.lock()
can = false
internalLock.unlock()
} else {
//Thread.sleep(forTimeInterval: 3)
milkRsource -= order.requiredMilk
milkLock.unlock()
}
internalDispatchGroup.leave()
}
}
if order.requiredCoffe != 0 {
internalDispatchGroup.enter()
internalQueue.async {
coffeLock.lock()
if getAvailableCoffe() < order.requiredCoffe {
coffeLock.unlock()
internalLock.lock()
can = false
internalLock.unlock()
} else {
//Thread.sleep(forTimeInterval: 2)
coffeResource -= order.requiredCoffe
coffeLock.unlock()
}
internalDispatchGroup.leave()
}
}
if order.requiredWater != 0 {
internalDispatchGroup.enter()
internalQueue.async {
waterLock.lock()
if getAvailableWater() < order.requiredWater {
waterLock.unlock()
internalLock.lock()
can = false
internalLock.unlock()
} else {
//Thread.sleep(forTimeInterval: 1)
waterResource -= order.requiredWater
waterLock.unlock()
}
internalDispatchGroup.leave()
}
}
// making sure that the three resources threads funished successfully
internalDispatchGroup.wait()
return can
}
func endWork() {
dispatchGroup.wait()
}
// note that I commented the thread.sleep in order to run faster, but every resource should have different serving time
initiateResources(initialMilk: 10000, initialCoffe: 10000, initialWater: 10000)
for _ in 0..<10 {
handleOrder(Order(requiredMilk: 5, requiredCoffe: 5, requiredWater: 5))
}
endWork()
print(milkRsource)
print(coffeResource)
print(waterResource)
/*
running 90 threads ech need 5 units from each resource ==> total of 450 units
So reaming resources should be 9550 from each type if no confclicts occured
*/
In prepareOrder, you are mutating resources, only synchronizing with the local lock (i.e., synchronizing within the current call). But if someone calls prepareOrder multiple times (which you are), there is nothing to prevent the concurrent interaction with resources. Your synchronization mechanism should be maintained at the level that the variable that it is synchronizing. E.g., a local variable can be synchronized with a local lock or queue (or other synchronization mechanism). A property should be synchronized with a lock or queue (or whatever) that is also a property of the class (but not a local).

Swift PropertyWrapper for waiting on a value to set for the first time not working

I intend to wait for the value is set for the first time and then the get method should return. But I am getting wrong result on the first time. Here is the code
import Foundation
import OSLog
#propertyWrapper
struct WaitTillSet<T> {
private var value: T
private let group: DispatchGroup
init(wrappedValue value: T) {
self.group = DispatchGroup()
self.value = value
group.enter()
}
var wrappedValue: T {
get { getValue() }
set { setValue(newValue: newValue) }
}
mutating func setValue(newValue: T) {
value = newValue
group.leave()
}
func getValue() -> T {
group.wait()
return value
}
}
let logger = Logger()
func testFunc() {
#WaitTillSet var data = 0
DispatchQueue.global(qos: .default).asyncAfter(deadline: .now() + 2) {
logger.info("Setting value to 10")
data = 10
}
logger.info("data = \(data)")
logger.info("dataAgain = \(data)")
}
testFunc()
And here is the output
2022-08-23 10:57:39.867967-0700 Test[68117:4747009] Setting value to 10
2022-08-23 10:57:39.868923-0700 Test[68117:4746644] data = 0
2022-08-23 10:57:39.869045-0700 Test[68117:4746644] dataAgain = 10
Program ended with exit code: 0
I also tried DispatchSemaphore. That gives the same result. Instead of propertyWrapper if I use class in the similar way I see the same problem. There is something fundamentally wrong with this.

protobuf based spring boot end point giving no output to swift client

I have a spring boot based micro-service endpoint which produces protobuf. Here is the .proto definition:
syntax = "proto3";
package TournamentFilterPackage;
import "google/protobuf/any.proto";
option java_package = "com.mycompany.service.tournament.proto";
option java_outer_classname = "TournamentCompleteData";
message TournamentData {
repeated TournamentRecord tournamentRecords = 1;
}
message TournamentRecord {
int64 id = 1;
Date start = 2;
float prize = 3;
string name = 4;
string speed = 5;
string type = 6;
float buyIn = 7;
int32 noOfParticipants = 8;
string status = 9;
}
message Date {
int32 year = 1;
int32 month = 2;
int32 day = 3;
}
message PokerResponseProto {
repeated int32 errorCodes = 1;
google.protobuf.Any responseObject = 2;
}
Here is my rest controller:
#ApiOperation(value="Complete Tournament Data", response = PokerResponseProto.class)
#GetMapping(value="/tournaments/completedata", produces = "application/x-protobuf")
public ResponseEntity<PokerResponseProto> getCompleteTournamentdata() {
if (logger.isInfoEnabled()) {
logger.info("BEGIN::/lobby/api/v1//tournaments/completedata/ " + "GET ::");
}
List<TournamentTypeResponseDto> tournamentTypeResponseDtos = new ArrayList<>();
List<TournamentRecord> tournamentRecords = new ArrayList<>();
ResponseEntity<PokerResponseProto> pokerResponse = null;
try {
tournamentTypeResponseDtos =
tournamentTypeDataService.getCompleteTournamentList();
for(TournamentTypeResponseDto t:tournamentTypeResponseDtos) {
tournamentRecords.add(controllerUtils.buildTournamentRecord(t));
}
TournamentData.Builder tournamentDataBuilder =
TournamentData.newBuilder().addAllTournamentRecords(tournamentRecords);
pokerResponse = new ResponseEntity<>(BKPokerResponseProto.newBuilder()
.setResponseObject(Any.pack(tournamentDataBuilder.build()))
.build(),
HttpStatus.OK);
logger.info("pokerResponse: {}", pokerResponse.toString());
} catch (PokerException pe) {
if (this.logger.isErrorEnabled()) {
this.logger.error(pe.getMessage(), bkpe);
}
List<Integer> errorCodeValue = controllerUtils
.convertErrorCodesToInt(bkpe.getErrorCodes());
pokerResponse = new ResponseEntity<>(PokerResponseProto.newBuilder()
.addAllErrorCodes(errorCodeValue)
.build(), HttpStatus.INTERNAL_SERVER_ERROR);
}
if (logger.isInfoEnabled()) {
logger.info("RETURN::/lobby/api/v1//tournaments/completedata/ " + "GET :: {}",
pokerResponse.toString());
}
return pokerResponse;
}
Here is the code snippet from ControllerUtils class:
public TournamentRecord buildTournamentRecord(TournamentTypeResponseDto dto) {
Instant tournamentStart = dto.getTournamentStartDate();
LocalDate localDate1 = LocalDateTime.ofInstant(tournamentStart,
ZoneOffset.UTC).toLocalDate();
Date.Builder dateBuilder1 = Date.newBuilder();
dateBuilder1.setYear(localDate1.getYear());
dateBuilder1.setMonth(localDate1.getMonthValue());
dateBuilder1.setDay(localDate1.getDayOfMonth());
TournamentCompleteData.Date trnamntStartDate = dateBuilder1.build();
TournamentRecord tr = TournamentRecord.newBuilder()
.setId(dto.getTournamentTypeId())
.setStart(trnamntStartDate)
.setPrize(dto.getFirstPrize())
.setName(dto.getTournamentName())
.setStatus(dto.getStatusName())
.build();
return tr;
}
I have written a test-case which can access this end-point & print the tournaments. But Swift client gets null when it invokes this end point. Already existing protobuf end points work fine with swift client & I have a ProtobufHttpMessageConverter already configured
#Bean
ProtobufHttpMessageConverter protobufHttpMessageConverter() {
return new ProtobufHttpMessageConverter();
}
And when the swift client invokes the end point, log messages get printed. That means client is invoking the end-point.
Here is the swift client code snippet for LobbyAPI:
import Foundation
class LobbyAPI: Api {
func getTournammentTableList(completion: #escaping ((TournamentFilterPackage_TournamentData?, Error?) -> Void)) {
getData(WithUrl: (APIConstants.lobbyURL) + "tournaments/completedata/", APIKey: APIConstants.apiKey, completion: completion)
}
For TournamentTableListVM
import Cocoa
import RealmSwift
class TournamentTableListVM: NSObject {
private weak var tblListView: LobbyTableListView!
private var tblPreviewVM: TablePreviewVM!
var arrFilterTableList: [LobbyFilterPackage_LobbyTableRecord] = []
var arrTableList: [LobbyFilterPackage_LobbyTableRecord] = []
var timer = Timer()
var didSelectTable: ((_ tableRecord: LobbyFilterPackage_LobbyTableRecord,_ isObserver:Bool) -> Void)!
var getFilterCallBack:(() -> Void)?
var currentSelectedMoneyType: (() -> TabMenuMoneyToggleView.Money) = { .play }{
didSet{
tblPreviewVM.currentSelectedMoneyType = currentSelectedMoneyType
}
}
var didTakeActionFromTablePreview: ((TablePreviewVM.TablePreviewPlayerActionType, Int) -> Void)?
//For sorting
var currentSortKey: String?
var currentSortAscending: Bool?
var aUserData: APPokerProto_PlayerProfileProto!
var observClick = false
func getCompleteTableData() {
LobbyAPI().getTournammentTableList() {[weak self] (aTournammentData, aError) in
guard let self = self else { return }
guard aError == nil, let tableData = aTournammentData, tableData.lobbyTableRecords.count > 0 else {
self.emptyTableAndPreview()
LoginManager.shared.didToggleNotify()
return
}
LoginManager.shared.didToggleNotify()
self.arrFilterTableList = tableData.lobbyTableRecords
self.arrTableList = tableData.lobbyTableRecords
self.tblListView.tblList.reloadData()
self.tblListView.tblList.selectRowIndexes(NSIndexSet.init(index: 0) as IndexSet, byExtendingSelection: true)
self.timeStamp = String(tableData.timeStamp.seconds)
}
}
In this above code "aTournammentData" is coming as nil.
Update Found that in front end swift, it was mapped to wrong generated swift struct. With the help of https://github.com/apple/swift-protobuf plugin TournamentCompleteData.pb.swift file was generated as below
// DO NOT EDIT.
// swift-format-ignore-file
//
// Generated by the Swift generator plugin for the protocol buffer compiler.
// Source: TournamentCompleteData.proto
//
// For information on using the generated types, please see the documentation:
// https://github.com/apple/swift-protobuf/
import Foundation
import SwiftProtobuf
// If the compiler emits an error on this type, it is because this file
// was generated by a version of the `protoc` Swift plug-in that is
// incompatible with the version of SwiftProtobuf to which you are linking.
// Please ensure that you are building against the same version of the API
// that was used to generate this file.
fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck {
struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {}
typealias Version = _2
}
struct TournamentFilterPackage_TournamentData {
// SwiftProtobuf.Message conformance is added in an extension below. See the
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
// methods supported on all messages.
var tournamentRecords: [TournamentFilterPackage_TournamentRecord] = []
var unknownFields = SwiftProtobuf.UnknownStorage()
init() {}
}
struct TournamentFilterPackage_TournamentRecord {
// SwiftProtobuf.Message conformance is added in an extension below. See the
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
// methods supported on all messages.
var id: Int64 = 0
var start: TournamentFilterPackage_Date {
get {return _start ?? TournamentFilterPackage_Date()}
set {_start = newValue}
}
/// Returns true if `start` has been explicitly set.
var hasStart: Bool {return self._start != nil}
/// Clears the value of `start`. Subsequent reads from it will return its default value.
mutating func clearStart() {self._start = nil}
var prize: Float = 0
var name: String = String()
var speed: String = String()
var type: String = String()
var buyIn: Float = 0
var noOfParticipants: Int32 = 0
var status: String = String()
var unknownFields = SwiftProtobuf.UnknownStorage()
init() {}
fileprivate var _start: TournamentFilterPackage_Date? = nil
}
struct TournamentFilterPackage_Date {
// SwiftProtobuf.Message conformance is added in an extension below. See the
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
// methods supported on all messages.
/// Year of the date. Must be from 1 to 9999, or 0 to specify a date without
/// a year.
var year: Int32 = 0
/// Month of a year. Must be from 1 to 12, or 0 to specify a year without a
/// month and day.
var month: Int32 = 0
/// Day of a month. Must be from 1 to 31 and valid for the year and month, or 0
/// to specify a year by itself or a year and month where the day isn't
/// significant.
var day: Int32 = 0
var unknownFields = SwiftProtobuf.UnknownStorage()
init() {}
}
// MARK: - Code below here is support for the SwiftProtobuf runtime.
fileprivate let _protobuf_package = "TournamentFilterPackage"
extension TournamentFilterPackage_TournamentData: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
static let protoMessageName: String = _protobuf_package + ".TournamentData"
static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "tournamentRecords"),
]
mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every case branch when no optimizations are
// enabled. https://github.com/apple/swift-protobuf/issues/1034
switch fieldNumber {
case 1: try { try decoder.decodeRepeatedMessageField(value: &self.tournamentRecords) }()
default: break
}
}
}
func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.tournamentRecords.isEmpty {
try visitor.visitRepeatedMessageField(value: self.tournamentRecords, fieldNumber: 1)
}
try unknownFields.traverse(visitor: &visitor)
}
static func ==(lhs: TournamentFilterPackage_TournamentData, rhs: TournamentFilterPackage_TournamentData) -> Bool {
if lhs.tournamentRecords != rhs.tournamentRecords {return false}
if lhs.unknownFields != rhs.unknownFields {return false}
return true
}
}
extension TournamentFilterPackage_TournamentRecord: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
static let protoMessageName: String = _protobuf_package + ".TournamentRecord"
static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "id"),
2: .same(proto: "start"),
3: .same(proto: "prize"),
4: .same(proto: "name"),
5: .same(proto: "speed"),
6: .same(proto: "type"),
7: .same(proto: "buyIn"),
8: .same(proto: "noOfParticipants"),
9: .same(proto: "status"),
]
mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every case branch when no optimizations are
// enabled. https://github.com/apple/swift-protobuf/issues/1034
switch fieldNumber {
case 1: try { try decoder.decodeSingularInt64Field(value: &self.id) }()
case 2: try { try decoder.decodeSingularMessageField(value: &self._start) }()
case 3: try { try decoder.decodeSingularFloatField(value: &self.prize) }()
case 4: try { try decoder.decodeSingularStringField(value: &self.name) }()
case 5: try { try decoder.decodeSingularStringField(value: &self.speed) }()
case 6: try { try decoder.decodeSingularStringField(value: &self.type) }()
case 7: try { try decoder.decodeSingularFloatField(value: &self.buyIn) }()
case 8: try { try decoder.decodeSingularInt32Field(value: &self.noOfParticipants) }()
case 9: try { try decoder.decodeSingularStringField(value: &self.status) }()
default: break
}
}
}
func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every if/case branch local when no optimizations
// are enabled. https://github.com/apple/swift-protobuf/issues/1034 and
// https://github.com/apple/swift-protobuf/issues/1182
if self.id != 0 {
try visitor.visitSingularInt64Field(value: self.id, fieldNumber: 1)
}
try { if let v = self._start {
try visitor.visitSingularMessageField(value: v, fieldNumber: 2)
} }()
if self.prize != 0 {
try visitor.visitSingularFloatField(value: self.prize, fieldNumber: 3)
}
if !self.name.isEmpty {
try visitor.visitSingularStringField(value: self.name, fieldNumber: 4)
}
if !self.speed.isEmpty {
try visitor.visitSingularStringField(value: self.speed, fieldNumber: 5)
}
if !self.type.isEmpty {
try visitor.visitSingularStringField(value: self.type, fieldNumber: 6)
}
if self.buyIn != 0 {
try visitor.visitSingularFloatField(value: self.buyIn, fieldNumber: 7)
}
if self.noOfParticipants != 0 {
try visitor.visitSingularInt32Field(value: self.noOfParticipants, fieldNumber: 8)
}
if !self.status.isEmpty {
try visitor.visitSingularStringField(value: self.status, fieldNumber: 9)
}
try unknownFields.traverse(visitor: &visitor)
}
static func ==(lhs: TournamentFilterPackage_TournamentRecord, rhs: TournamentFilterPackage_TournamentRecord) -> Bool {
if lhs.id != rhs.id {return false}
if lhs._start != rhs._start {return false}
if lhs.prize != rhs.prize {return false}
if lhs.name != rhs.name {return false}
if lhs.speed != rhs.speed {return false}
if lhs.type != rhs.type {return false}
if lhs.buyIn != rhs.buyIn {return false}
if lhs.noOfParticipants != rhs.noOfParticipants {return false}
if lhs.status != rhs.status {return false}
if lhs.unknownFields != rhs.unknownFields {return false}
return true
}
}
extension TournamentFilterPackage_Date: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
static let protoMessageName: String = _protobuf_package + ".Date"
static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "year"),
2: .same(proto: "month"),
3: .same(proto: "day"),
]
mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every case branch when no optimizations are
// enabled. https://github.com/apple/swift-protobuf/issues/1034
switch fieldNumber {
case 1: try { try decoder.decodeSingularInt32Field(value: &self.year) }()
case 2: try { try decoder.decodeSingularInt32Field(value: &self.month) }()
case 3: try { try decoder.decodeSingularInt32Field(value: &self.day) }()
default: break
}
}
}
func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if self.year != 0 {
try visitor.visitSingularInt32Field(value: self.year, fieldNumber: 1)
}
if self.month != 0 {
try visitor.visitSingularInt32Field(value: self.month, fieldNumber: 2)
}
if self.day != 0 {
try visitor.visitSingularInt32Field(value: self.day, fieldNumber: 3)
}
try unknownFields.traverse(visitor: &visitor)
}
static func ==(lhs: TournamentFilterPackage_Date, rhs: TournamentFilterPackage_Date) -> Bool {
if lhs.year != rhs.year {return false}
if lhs.month != rhs.month {return false}
if lhs.day != rhs.day {return false}
if lhs.unknownFields != rhs.unknownFields {return false}
return true
}
}

How to convert DispatchQueue debounce to Swift Concurrency task?

I have an existing debouncer utility using DispatchQueue. It accepts a closure and executes it before the time threshold is met. It can be used like this:
let limiter = Debouncer(limit: 5)
var value = ""
func sendToServer() {
limiter.execute {
print("\(Date.now.timeIntervalSince1970): Fire! \(value)")
}
}
value.append("h")
sendToServer() // Waits until 5 seconds
value.append("e")
sendToServer() // Waits until 5 seconds
value.append("l")
sendToServer() // Waits until 5 seconds
value.append("l")
sendToServer() // Waits until 5 seconds
value.append("o")
sendToServer() // Waits until 5 seconds
print("\(Date.now.timeIntervalSince1970): Last operation called")
// 1635691696.482115: Last operation called
// 1635691701.859087: Fire! hello
Notice it is not calling Fire! multiple times, but just 5 seconds after the last time with the value from the last task. The Debouncer instance is configured to hold the last task in queue for 5 seconds no matter how many times it is called. The closure is passed into the execute(block:) method:
final class Debouncer {
private let limit: TimeInterval
private let queue: DispatchQueue
private var workItem: DispatchWorkItem?
private let syncQueue = DispatchQueue(label: "Debouncer", attributes: [])
init(limit: TimeInterval, queue: DispatchQueue = .main) {
self.limit = limit
self.queue = queue
}
#objc func execute(block: #escaping () -> Void) {
syncQueue.async { [weak self] in
if let workItem = self?.workItem {
workItem.cancel()
self?.workItem = nil
}
guard let queue = self?.queue, let limit = self?.limit else { return }
let workItem = DispatchWorkItem(block: block)
queue.asyncAfter(deadline: .now() + limit, execute: workItem)
self?.workItem = workItem
}
}
}
How can I convert this into a concurrent operation so it can be called like below:
let limit = Debouncer(limit: 5)
func sendToServer() {
await limiter.waitUntilFinished
print("\(Date.now.timeIntervalSince1970): Fire! \(value)")
}
sendToServer()
sendToServer()
sendToServer()
However, this wouldn't debounce the tasks but suspend them until the next one gets called. Instead it should cancel the previous task and hold the current task until the debounce time. Can this be done with Swift Concurrency or is there a better approach to do this?
Tasks have the ability to use isCancelled or checkCancellation, but for the sake of a debounce routine, where you want to wait for a period of time, you might just use the throwing rendition of Task.sleep(nanoseconds:), whose documentation says:
If the task is canceled before the time ends, this function throws CancellationError.
Thus, this effectively debounces for 2 seconds.
var task: Task<(), Never>?
func debounced(_ string: String) {
task?.cancel()
task = Task {
do {
try await Task.sleep(nanoseconds: 2_000_000_000)
logger.log("result \(string)")
} catch {
logger.log("canceled \(string)")
}
}
}
Note, Appleā€™s swift-async-algorithms has a debounce for asynchronous sequences.
Based on #Rob's great answer, here's a sample using an actor and Task:
actor Limiter {
enum Policy {
case throttle
case debounce
}
private let policy: Policy
private let duration: TimeInterval
private var task: Task<Void, Never>?
init(policy: Policy, duration: TimeInterval) {
self.policy = policy
self.duration = duration
}
nonisolated func callAsFunction(task: #escaping () async -> Void) {
Task {
switch policy {
case .throttle:
await throttle(task: task)
case .debounce:
await debounce(task: task)
}
}
}
private func throttle(task: #escaping () async -> Void) {
guard self.task?.isCancelled ?? true else { return }
Task {
await task()
}
self.task = Task {
try? await sleep()
self.task?.cancel()
self.task = nil
}
}
private func debounce(task: #escaping () async -> Void) {
self.task?.cancel()
self.task = Task {
do {
try await sleep()
guard !Task.isCancelled else { return }
await task()
} catch {
return
}
}
}
private func sleep() async throws {
try await Task.sleep(nanoseconds: UInt64(duration * 1_000_000_000))
}
}
The tests are inconsistent in passing so I think my assumption on the order of the tasks firing is incorrect, but the sample is a good start I think:
final class LimiterTests: XCTestCase {
func testThrottler() async throws {
// Given
let promise = expectation(description: "Ensure first task fired")
let throttler = Limiter(policy: .throttle, duration: 1)
var value = ""
var fulfillmentCount = 0
promise.expectedFulfillmentCount = 2
func sendToServer(_ input: String) {
throttler {
value += input
// Then
switch fulfillmentCount {
case 0:
XCTAssertEqual(value, "h")
case 1:
XCTAssertEqual(value, "hwor")
default:
XCTFail()
}
promise.fulfill()
fulfillmentCount += 1
}
}
// When
sendToServer("h")
sendToServer("e")
sendToServer("l")
sendToServer("l")
sendToServer("o")
await sleep(2)
sendToServer("wor")
sendToServer("ld")
wait(for: [promise], timeout: 10)
}
func testDebouncer() async throws {
// Given
let promise = expectation(description: "Ensure last task fired")
let limiter = Limiter(policy: .debounce, duration: 1)
var value = ""
var fulfillmentCount = 0
promise.expectedFulfillmentCount = 2
func sendToServer(_ input: String) {
limiter {
value += input
// Then
switch fulfillmentCount {
case 0:
XCTAssertEqual(value, "o")
case 1:
XCTAssertEqual(value, "old")
default:
XCTFail()
}
promise.fulfill()
fulfillmentCount += 1
}
}
// When
sendToServer("h")
sendToServer("e")
sendToServer("l")
sendToServer("l")
sendToServer("o")
await sleep(2)
sendToServer("wor")
sendToServer("ld")
wait(for: [promise], timeout: 10)
}
func testThrottler2() async throws {
// Given
let promise = expectation(description: "Ensure throttle before duration")
let throttler = Limiter(policy: .throttle, duration: 1)
var end = Date.now + 1
promise.expectedFulfillmentCount = 2
func test() {
// Then
XCTAssertLessThan(.now, end)
promise.fulfill()
}
// When
throttler(task: test)
throttler(task: test)
throttler(task: test)
throttler(task: test)
throttler(task: test)
await sleep(2)
end = .now + 1
throttler(task: test)
throttler(task: test)
throttler(task: test)
await sleep(2)
wait(for: [promise], timeout: 10)
}
func testDebouncer2() async throws {
// Given
let promise = expectation(description: "Ensure debounce after duration")
let debouncer = Limiter(policy: .debounce, duration: 1)
var end = Date.now + 1
promise.expectedFulfillmentCount = 2
func test() {
// Then
XCTAssertGreaterThan(.now, end)
promise.fulfill()
}
// When
debouncer(task: test)
debouncer(task: test)
debouncer(task: test)
debouncer(task: test)
debouncer(task: test)
await sleep(2)
end = .now + 1
debouncer(task: test)
debouncer(task: test)
debouncer(task: test)
await sleep(2)
wait(for: [promise], timeout: 10)
}
private func sleep(_ duration: TimeInterval) async {
await Task.sleep(UInt64(duration * 1_000_000_000))
}
}

Swift Completion Handler For Loop to be performed once instead of 10 times due to the loop

I I have a loop with a firestore query in it that is repeated 10 times. I need to call the (completion: block) after all the 10 queries completed; Here I have my code so that it performs the (completion: block) per query but this would be too heavy on the server and the user's phone. How can I change below to accomplish what I just described?
static func getSearchedProducts(fetchingNumberToStart: Int, sortedProducts: [Int : [String : Int]], handler: #escaping (_ products: [Product], _ lastFetchedNumber: Int?) -> Void) {
var lastFetchedNumber:Int = 0
var searchedProducts:[Product] = []
let db = Firestore.firestore()
let block : FIRQuerySnapshotBlock = ({ (snap, error) in
guard error == nil, let snapshot = snap else {
debugPrint(error?.localizedDescription)
return
}
var products = snapshot.documents.map { Product(data: $0.data()) }
if !UserService.current.isGuest {
db.collection(DatabaseRef.Users).document(Auth.auth().currentUser?.uid ?? "").collection(DatabaseRef.Cart).getDocuments { (cartSnapshot, error) in
guard error == nil, let cartSnapshot = cartSnapshot else {
return
}
cartSnapshot.documents.forEach { document in
var product = Product(data: document.data())
guard let index = products.firstIndex(of: product) else { return }
let cartCount: Int = document.exists ? document.get(DatabaseRef.cartCount) as? Int ?? 0 : 0
product.cartCount = cartCount
products[index] = product
}
handler(products, lastFetchedNumber)
}
}
else {
handler(products, lastFetchedNumber)
}
})
if lastFetchedNumber == fetchingNumberToStart {
for _ in 0 ..< 10 {
//change the fetching number each time in the loop
lastFetchedNumber = lastFetchedNumber + 1
let productId = sortedProducts[lastFetchedNumber]?.keys.first ?? ""
if productId != "" {
db.collection(DatabaseRef.products).whereField(DatabaseRef.id, isEqualTo: productId).getDocuments(completion: block)
}
}
}
}
as you can see at the very end I am looping 10 times for this query because of for _ in 0 ..< 10 :
if productId != "" {
db.collection(DatabaseRef.products).whereField(DatabaseRef.id, isEqualTo: productId).getDocuments(completion: block)
}
So I need to make the completion: block handler to be called only once instead of 10 times here.
Use a DispatchGroup. You can enter the dispatch group each time you call the async code and then leave each time it's done. Then when everything is finished it will call the notify block and you can call your handler. Here's a quick example of what that would look like:
let dispatchGroup = DispatchGroup()
let array = []
for i in array {
dispatchGroup.enter()
somethingAsync() {
dispatchGroup.leave()
}
}
dispatchGroup.notify(queue: .main) {
handler()
}