I am using Twilio iOS framework to connect in the group room.
On the click on connect room button below is the code which I used
let recorder = RPScreenRecorder.shared()
recorder.isMicrophoneEnabled = false
recorder.isCameraEnabled = false
// The source produces either downscaled buffers with smoother motion, or an HD screen recording.
videoSource = ReplayKitVideoSource(isScreencast: true, telecineOptions: ReplayKitVideoSource.TelecineOptions.disabled)
screenTrack = LocalVideoTrack(source: videoSource!,
enabled: true,
name: "Screen")
recorder.startCapture(handler: { (sampleBuffer, type, error) in
if error != nil {
print("Capture error: ", error as Any)
return
}
switch type {
case RPSampleBufferType.video:
self.videoSource?.processFrame(sampleBuffer: sampleBuffer)
break
case RPSampleBufferType.audioApp:
break
case RPSampleBufferType.audioMic:
// We use `TVIDefaultAudioDevice` to capture and playback audio for conferencing.
break
}
}) { (error) in
if error != nil {
print("Screen capture error: ", error as Any)
} else {
print("Screen capture started.")
}
}
if (accessToken == "TWILIO_ACCESS_TOKEN") {
do {
accessToken = try TokenUtils.fetchToken(url: tokenUrl)
} catch {
let message = "Failed to fetch access token"
logMessage(messageText: message)
return
}
}
// Prepare local media which we will share with Room Participants.
self.prepareLocalMedia()
// Preparing the connect options with the access token that we fetched (or hardcoded).
let connectOptions = ConnectOptions(token: accessToken) { (builder) in
// Use the local media that we prepared earlier.
builder.audioTracks = self.localAudioTrack != nil ? [self.localAudioTrack!] : [LocalAudioTrack]()
builder.videoTracks = self.localVideoTrack != nil ? [self.localVideoTrack!, self.screenTrack!] : [LocalVideoTrack]()
// Use the preferred audio codec
if let preferredAudioCodec = Settings.shared.audioCodec {
builder.preferredAudioCodecs = [preferredAudioCodec]
}
// Use the preferred video codec
if let preferredVideoCodec = Settings.shared.videoCodec {
builder.preferredVideoCodecs = [preferredVideoCodec]
}
// Use the preferred encoding parameters
if let encodingParameters = Settings.shared.getEncodingParameters() {
builder.encodingParameters = encodingParameters
}
// Use the preferred signaling region
if let signalingRegion = Settings.shared.signalingRegion {
builder.region = signalingRegion
}
builder.roomName = self.roomTextField.text
}
// Connect to the Room using the options we provided.
room = TwilioVideoSDK.connect(options: connectOptions, delegate: self)
logMessage(messageText: "Attempting to connect to room \(String(describing: self.roomTextField.text))")
When I connected in the group with remote participant I want to share the screen with remote participant.
To implement this feature I have referred the “ReplayKitExample” with in-app capture method. But not able to do that.
Remote participant not able to see the screen share content.
Nothing is happening related to screen share with this, and looking for inputs on implementing it.
I want to share the screen to remote participant.
Its happening because you are trying to send "cameraSource" and "videoSource" both data at the same time you have to unsubscribe the "cameraSource" before sending "viseoSource".
Heres my code you can refer:
//MARK: - Screen Sharing via replaykit
extension CallRoomViewController: RPScreenRecorderDelegate {
func broadCastButtonTapped(){
guard screenRecorder.isAvailable else {
print("Not able to Broadcast")
return
}
print("Can Broadcast")
if self.videoSource != nil {
self.stopConference()
} else {
self.startConference()
}
}
func publishVideoTrack(){
if let participant = self.room?.localParticipant,
let videoTrack = self.localVideoTrack {
participant.publishVideoTrack(videoTrack)
}
}
func unpublishVideoTrack(){
if let participant = self.room?.localParticipant,
let videoTrack = self.localVideoTrack {
participant.unpublishVideoTrack(videoTrack)
}
}
func stopConference() {
self.unpublishVideoTrack()
self.localVideoTrack = nil
self.videoSource = nil
self.localVideoTrack = LocalVideoTrack(source: cameraSource!, enabled: true, name: "Camera")
screenRecorder.stopCapture{ (captureError) in
if let error = captureError {
print("Screen capture stop error: ", error as Any)
} else {
print("Screen capture stopped.")
self.publishVideoTrack()
}
}
}
func startConference() {
self.unpublishVideoTrack()
self.localVideoTrack = nil
// We are only using ReplayKit to capture the screen.
// Use a LocalAudioTrack to capture the microphone for sharing audio in the room.
screenRecorder.isMicrophoneEnabled = false
// Use a LocalVideoTrack with a CameraSource to capture the camera for sharing camera video in the room.
screenRecorder.isCameraEnabled = false
// The source produces either downscaled buffers with smoother motion, or an HD screen recording.
self.videoSource = ReplayKitVideoSource(isScreencast: true,
telecineOptions: ReplayKitVideoSource.TelecineOptions.p60to24or25or30)
self.localVideoTrack = LocalVideoTrack(source: videoSource!,
enabled: true,
name: "Screen")
let videoCodec = Settings.shared.videoCodec ?? Vp8Codec()!
let (_, outputFormat) = ReplayKitVideoSource.getParametersForUseCase(codec: videoCodec,
isScreencast: true,
telecineOptions:ReplayKitVideoSource.TelecineOptions.p60to24or25or30)
self.videoSource?.requestOutputFormat(outputFormat)
screenRecorder.startCapture(handler: { (sampleBuffer, type, error) in
if error != nil {
print("Capture error: ", error as Any)
return
}
switch type {
case RPSampleBufferType.video:
self.videoSource?.processFrame(sampleBuffer: sampleBuffer)
break
case RPSampleBufferType.audioApp:
break
case RPSampleBufferType.audioMic:
// We use `TVIDefaultAudioDevice` to capture and playback audio for conferencing.
break
default:
print(error ?? "screenRecorder error")
}
}) { (error) in
if error != nil {
print("Screen capture error: ", error as Any)
} else {
print("Screen capture started.")
self.publishVideoTrack()
}
}
}
}
And you can connect room from viewDidLoad()
func connectToChatRoom(){
// Configure access token either from server or manually.
// If the default wasn't changed, try fetching from server.
accessToken = self.callRoomDetail.charRoomAccessToken
guard accessToken != "TWILIO_ACCESS_TOKEN" else {
let message = "Failed to fetch access token"
print( message)
return
}
// Prepare local media which we will share with Room Participants.
self.prepareLocalMedia()
// Preparing the connect options with the access token that we fetched (or hardcoded).
let connectOptions = ConnectOptions(token: accessToken) { (builder) in
// The name of the Room where the Client will attempt to connect to. Please note that if you pass an empty
// Room `name`, the Client will create one for you. You can get the name or sid from any connected Room.
builder.roomName = self.callRoomDetail.chatRoomName
// Use the local media that we prepared earlier.
if let audioTrack = self.localAudioTrack {
builder.audioTracks = [ audioTrack ]
}
if let videoTrack = self.localVideoTrack {
builder.videoTracks = [ videoTrack ]
}
// Use the preferred audio codec
if let preferredAudioCodec = Settings.shared.audioCodec {
builder.preferredAudioCodecs = [preferredAudioCodec]
}
// Use the preferred video codec
if let preferredVideoCodec = Settings.shared.videoCodec {
builder.preferredVideoCodecs = [preferredVideoCodec]
}
// Use the preferred encoding parameters
let videoCodec = Settings.shared.videoCodec ?? Vp8Codec()!
let (encodingParams, _) = ReplayKitVideoSource.getParametersForUseCase(codec: videoCodec,
isScreencast: true,
telecineOptions:ReplayKitVideoSource.TelecineOptions.p60to24or25or30)
builder.encodingParameters = encodingParams
// Use the preferred signaling region
if let signalingRegion = Settings.shared.signalingRegion {
builder.region = signalingRegion
}
builder.isAutomaticSubscriptionEnabled = true
builder.isNetworkQualityEnabled = true
builder.networkQualityConfiguration = NetworkQualityConfiguration(localVerbosity: .minimal,
remoteVerbosity: .minimal)
}
// Connect to the Room using the options we provided.
room = TwilioVideoSDK.connect(options: connectOptions, delegate: self)
print( "Attempting to connect to room \(self.callRoomDetail.chatRoomName ?? ""))")
self.showRoomUI(inRoom: true)
}
You can get ReplayKitVideoSource file and other files from twilio repository https://github.com/twilio/video-quickstart-ios/tree/master/ReplayKitExample
I work at Twilio and I can confirm that you should be able to publish video tracks for both camera and screen at the same time without issue.
It is difficult to identify why this is not working for you without a completely functional example app.
However I have tested this using one of our reference apps and confirmed it is working. More details are here: https://github.com/twilio/video-quickstart-ios/issues/650#issuecomment-1178232542
Hopefully this is a useful example for how to publish both camera and screen video at the same time.
Related
I am trying to implement speech recognition using the Azure Speech SDK in iOS project using Swift and I ran into the problem that the speech recognition completion function (stopContinuousRecognition()) blocks the app UI for a few seconds, but there is no memory or processor load or leak. I tried to move this function to DispatchQueue.main.async {}, but it gave no results. Maybe someone faced such a problem? Is it necessary to put this in a separate thread and why does the function take so long to finish?
Edit:
It is very hard to provide working example, but basically I am calling this function on button press:
private func startListenAzureRecognition(lang:String) {
let audioFormat = SPXAudioStreamFormat.init(usingPCMWithSampleRate: 8000, bitsPerSample: 16, channels: 1)
azurePushAudioStream = SPXPushAudioInputStream(audioFormat: audioFormat!)
let audioConfig = SPXAudioConfiguration(streamInput: azurePushAudioStream!)!
var speechConfig: SPXSpeechConfiguration?
do {
let sub = "enter your code here"
let region = "enter you region here"
try speechConfig = SPXSpeechConfiguration(subscription: sub, region: region)
speechConfig!.enableDictation();
speechConfig?.speechRecognitionLanguage = lang
} catch {
print("error \(error) happened")
speechConfig = nil
}
self.azureRecognition = try! SPXSpeechRecognizer(speechConfiguration: speechConfig!, audioConfiguration: audioConfig)
self.azureRecognition!.addRecognizingEventHandler() {reco, evt in
if (evt.result.text != nil && evt.result.text != "") {
print(evt.result.text ?? "no result")
}
}
self.azureRecognition!.addRecognizedEventHandler() {reco, evt in
if (evt.result.text != nil && evt.result.text != "") {
print(evt.result.text ?? "no result")
}
}
do {
try! self.azureRecognition?.startContinuousRecognition()
} catch {
print("error \(error) happened")
}
}
And when I press the button again to stop recognition, I am calling this function:
private func stopListenAzureRecognition(){
DispatchQueue.main.async {
print("start")
// app blocks here
try! self.azureRecognition?.stopContinuousRecognition()
self.azurePushAudioStream!.close()
self.azureRecognition = nil
self.azurePushAudioStream = nil
print("stop")
}
}
Also I am using raw audio data from mic (recognizeOnce works perfectly for first phrase, so everything is fine with audio data)
Try closing the stream first and then stopping the continuous recognition:
azurePushAudioStream!.close()
try! azureRecognition?.stopContinuousRecognition()
azureRecognition = nil
azurePushAudioStream = nil
You don't even need to do it asynchronously.
At least this worked for me.
I am making an app that activates a VPN connection based on OpenVPN, retrieves a certificate from the database, and opens a tunnel using NEPacketTunnelProvider and NetworkExtension.
I used the following repository, and now my VPN is working fine.
But the problem is that I want to allow only one app to use this VPN when enabled (WhatsApp precisely), and I want to restrict all other apps of using it.
On Android it's possible by giving the bundle identifier of the allowed apps to the PackageManager.
Can you please help me?
This is my PacketTunnelProvider class:
import NetworkExtension
import OpenVPNAdapter
extension NEPacketTunnelFlow: OpenVPNAdapterPacketFlow {}
class PacketTunnelProvider: NEPacketTunnelProvider {
lazy var vpnAdapter: OpenVPNAdapter = {
let adapter = OpenVPNAdapter()
adapter.delegate = self
return adapter
}()
let vpnReachability = OpenVPNReachability()
var startHandler: ((Error?) -> Void)?
var stopHandler: (() -> Void)?
override func startTunnel(options: [String : NSObject]?, completionHandler: #escaping (Error?) -> Void) {
// There are many ways to provide OpenVPN settings to the tunnel provider. For instance,
// you can use `options` argument of `startTunnel(options:completionHandler:)` method or get
// settings from `protocolConfiguration.providerConfiguration` property of `NEPacketTunnelProvider`
// class. Also you may provide just content of a ovpn file or use key:value pairs
// that may be provided exclusively or in addition to file content.
// In our case we need providerConfiguration dictionary to retrieve content
// of the OpenVPN configuration file. Other options related to the tunnel
// provider also can be stored there.
print("started!")
guard
let protocolConfiguration = protocolConfiguration as? NETunnelProviderProtocol,
let providerConfiguration = protocolConfiguration.providerConfiguration
else {
fatalError()
}
guard let ovpnFileContent: Data = providerConfiguration["ovpn"] as? Data else {
fatalError()
}
let configuration = OpenVPNConfiguration()
configuration.fileContent = ovpnFileContent
// configuration.settings = [
// // Additional parameters as key:value pairs may be provided here
// ]
// Uncomment this line if you want to keep TUN interface active during pauses or reconnections
// configuration.tunPersist = true
// Apply OpenVPN configuration
let evaluation: OpenVPNConfigurationEvaluation
do {
evaluation = try vpnAdapter.apply(configuration: configuration)
} catch {
completionHandler(error)
return
}
// Provide credentials if needed
if !evaluation.autologin {
// If your VPN configuration requires user credentials you can provide them by
// `protocolConfiguration.username` and `protocolConfiguration.passwordReference`
// properties. It is recommended to use persistent keychain reference to a keychain
// item containing the password.
guard let username: String = protocolConfiguration.username else {
fatalError()
}
// Retrieve a password from the keychain
// guard let password: String = ... {
// fatalError()
// }
let credentials = OpenVPNCredentials()
credentials.username = username
// credentials.password = password
do {
try vpnAdapter.provide(credentials: credentials)
} catch {
completionHandler(error)
return
}
}
// Checking reachability. In some cases after switching from cellular to
// WiFi the adapter still uses cellular data. Changing reachability forces
// reconnection so the adapter will use actual connection.
vpnReachability.startTracking { [weak self] status in
guard status == .reachableViaWiFi else { return }
self?.vpnAdapter.reconnect(afterTimeInterval: 5)
}
// Establish connection and wait for .connected event
startHandler = completionHandler
vpnAdapter.connect(using: packetFlow)
}
override func stopTunnel(with reason: NEProviderStopReason, completionHandler: #escaping () -> Void) {
stopHandler = completionHandler
if vpnReachability.isTracking {
vpnReachability.stopTracking()
}
vpnAdapter.disconnect()
}
}
extension PacketTunnelProvider: OpenVPNAdapterDelegate {
// OpenVPNAdapter calls this delegate method to configure a VPN tunnel.
// `completionHandler` callback requires an object conforming to `OpenVPNAdapterPacketFlow`
// protocol if the tunnel is configured without errors. Otherwise send nil.
// `OpenVPNAdapterPacketFlow` method signatures are similar to `NEPacketTunnelFlow` so
// you can just extend that class to adopt `OpenVPNAdapterPacketFlow` protocol and
// send `self.packetFlow` to `completionHandler` callback.
func openVPNAdapter(_ openVPNAdapter: OpenVPNAdapter, configureTunnelWithNetworkSettings networkSettings: NEPacketTunnelNetworkSettings?, completionHandler: #escaping (Error?) -> Void) {
// In order to direct all DNS queries first to the VPN DNS servers before the primary DNS servers
// send empty string to NEDNSSettings.matchDomains
networkSettings?.dnsSettings?.matchDomains = [""]
// Set the network settings for the current tunneling session.
setTunnelNetworkSettings(networkSettings, completionHandler: completionHandler)
}
// Process events returned by the OpenVPN library
func openVPNAdapter(_ openVPNAdapter: OpenVPNAdapter, handleEvent event: OpenVPNAdapterEvent, message: String?) {
switch event {
case .connected:
if reasserting {
reasserting = false
}
guard let startHandler = startHandler else { return }
startHandler(nil)
self.startHandler = nil
case .disconnected:
guard let stopHandler = stopHandler else { return }
if vpnReachability.isTracking {
vpnReachability.stopTracking()
}
stopHandler()
self.stopHandler = nil
case .reconnecting:
reasserting = true
default:
break
}
}
// Handle errors thrown by the OpenVPN library
func openVPNAdapter(_ openVPNAdapter: OpenVPNAdapter, handleError error: Error) {
// Handle only fatal errors
guard let fatal = (error as NSError).userInfo[OpenVPNAdapterErrorFatalKey] as? Bool, fatal == true else {
return
}
if vpnReachability.isTracking {
vpnReachability.stopTracking()
}
if let startHandler = startHandler {
startHandler(error)
self.startHandler = nil
} else {
cancelTunnelWithError(error)
}
}
// Use this method to process any log message returned by OpenVPN library.
func openVPNAdapter(_ openVPNAdapter: OpenVPNAdapter, handleLogMessage logMessage: String) {
// Handle log messages
print(logMessage)
}
}
This is the function used in my VPN View Model to start a tunnel:
func configureVPN(serverAddress: String, username: String, password: String) {
var configData:Data = Data.init()
self.getCertificate{certificate in
configData = certificate!
guard
//If we want to read from a file
// let configData = self.readFile(name: "vtest2"),
let providerManager = self.providerManager
else {
return
}
self.providerManager?.loadFromPreferences { error in
if error == nil {
let tunnelProtocol = NETunnelProviderProtocol()
tunnelProtocol.username = username
tunnelProtocol.serverAddress = serverAddress
tunnelProtocol.providerBundleIdentifier = self.providerId // bundle id of the network extension target
tunnelProtocol.providerConfiguration = ["ovpn": configData]
tunnelProtocol.disconnectOnSleep = false
providerManager.protocolConfiguration = tunnelProtocol
providerManager.localizedDescription = "Slyfone Guard" // the title of the VPN profile which will appear on Settings
providerManager.isEnabled = true
providerManager.saveToPreferences(completionHandler: { (error) in
if error == nil {
providerManager.loadFromPreferences(completionHandler: { (error) in
do {
try providerManager.connection.startVPNTunnel(options: nil) // starts the VPN tunnel.
} catch let error {
print(error.localizedDescription)
}
})
}
})
}
}
}
}
As an engineer from Apple said:
The way to do it is to use Per-App VPN. See the Per-App VPN On Demand section in the NETunnelProviderManager documentation.
With NEPacketTunnelProvider on macOS (as of 10.15.4) you can set this up yourself with NEAppRule. A very generic example of setting up Safari to trigger the VPN would be:
var perAppManager = NETunnelProviderManager.forPerAppVPN()
/* ... */
NETunnelProviderManager.forPerAppVPN().loadFromPreferences(completionHandler: { error in
precondition(Thread.isMainThread)
/* ... */
let proto = (perAppManager.protocolConfiguration as? NETunnelProviderProtocol) ?? NETunnelProviderProtocol()
proto.serverAddress = "server.vpn.com"
proto.providerBundleIdentifier = "com.perapp-vpn.macOSPacketTunnel.PacketTunnelTest"
var appRules = [NEAppRule]()
let appRule = NEAppRule(signingIdentifier: "com.apple.Safari", designatedRequirement: "identifier \"com.apple.Safari\" and anchor apple")
appRule.matchDomains = ["example.com"]
appRules.append(appRule)
perAppManager.appRules = appRules
perAppManager.isOnDemandEnabled = true
perAppManager.protocolConfiguration = proto
perAppManager.isEnabled = true
perAppManager.localizedDescription = "Testing Per-App VPN"
self.perAppManager.saveToPreferences { saveError in
/* Proceed to connect */
}
})
That was a very generic case and forPerAppVPN() is only available on macOS. A more real-world case world case for iOS would be to create this process through MDM. That entire flow is explained in the documentation I mentioned previously. I would start by just creating a configuration profile in Configurator 2 and testing it out.
No idea if it works on OpenVPN
I have tried a couple of different things, and at this point I am stumped. I simply want to be able to access the user's email to present it in a view. However I have not been able to successfully present, much less retrieve, this information. Here are the two pieces of code I have tried with:
func getUsername() -> String? {
if(self.isAuth) {
return AWSMobileClient.default().username
} else {
return nil
}
}
and
func getUserEmail() -> String {
var returnValue = String()
AWSMobileClient.default().getUserAttributes { (attributes, error) in
if(error != nil){
print("ERROR: \(String(describing: error))")
}else{
if let attributesDict = attributes{
//print(attributesDict["email"])
self.name = attributesDict["name"]!
returnValue = attributesDict["name"]!
}
}
}
print("return value: \(returnValue)")
return returnValue
}
Does anyone know why this is not working?
After sign in try this:
AWSMobileClient.default().getTokens { (tokens, error) in
if let error = error {
print("error \(error)")
} else if let tokens = tokens {
let claims = tokens.idToken?.claims
print("claims \(claims)")
print("email? \(claims?["email"] as? String ?? "No email")")
}
}
I've tried getting the user attributes using AWSMobileClient getUserAttributes with no success. Also tried using AWSCognitoIdentityPool getDetails With no success. Might be an error from AWS Mobile Client, but we can still get attributes from the id token, as seen above.
If you are using Hosted UI, remember to give your hosted UI the correct scopes, for example:
let hostedUIOptions = HostedUIOptions(scopes: ["openid", "email", "profile"], identityProvider: "Google")
It is because it is an async function so will return but later than when the function actually ends with the value. Only way I found to do it is placing a while loop and then using an if condition.
Sorry, my subject isn't very specific.
I'm dealing with managing multiple users in my realm and having some problems. I have a user register with their email and name, then when realm logs in and creates a SyncUser, I create a new YpbUser object with the email, name, and SyncUser.current.identity in YpbUser.id.
When a user logs in, I want to use their email to look up whether there's an existing YpbUser for their email. I'm doing it this way so in the future if someone uses username/password and then uses Google auth (not yet implemented), those SyncUsers can get to the same YpbUser.
Below is my code for checking for an existing YpbUser, along with my realm setup code. The problem I'm having is that it usually, but not always, fails to find a YpbUser even if that user exists (I'm looking at the YpbUser list in realm studio and the user with that email is definitely there!)
Inspecting from within existingUser(for:in:), users is usually 0, but it sometimes non-zero.
I assume the issue lies somewhere in the fact that I'm pretty much just guessing on how to use SyncSubscription.observe.
Please help?
fileprivate func openRealmWithUser(user: SyncUser) {
DispatchQueue.main.async { [weak self] in
let config = user.configuration(realmURL: RealmConstants.realmURL, fullSynchronization: false, enableSSLValidation: true, urlPrefix: nil)
self?.realm = try! Realm(configuration: config)
let songSub = self?.realm?.objects(Song.self).subscribe()
let usersSub = self?.realm?.objects(YpbUser.self).subscribe()
self?.usersToken = usersSub?.observe(\.state, options: .initial) { state in
if !(self?.proposedUser.email.isEmpty)! {
self?.findYpbUser(in: (self?.realm)!)
}
}
self?.performSegue(withIdentifier: Storyboard.LoginToNewRequestSegue, sender: nil)
}
}
fileprivate func findYpbUser(in realm: Realm) {
if proposedUser != YpbUser() { // self.proposedUser.email gets set from the login method
let existingUser = YpbUser.existingUser(for: proposedUser, in: realm)
guard existingUser == nil else { // If we find the YpbUser, set as current:
try! realm.write {
pr("YpbUser found: \(existingUser!)")
YpbUser.current = existingUser }
return
}
pr("YpbUser not found.") // i.e., SyncUser set, but no YpbUser found. i.e., creating a new YpbUser
createNewYpbUser(for: proposedUser, in: realm)
}
}
extension YpbUser {
class func existingUser (for proposedUser: YpbUser, in realm: Realm) -> YpbUser? {
let users = realm.objects(YpbUser.self)
let usersWithThisEmail = users.filter("email = %#", proposedUser.email)
if let emailUser = usersWithThisEmail.first {
return emailUser
}
return nil
}
}
I want to do something simple in Swift. I have to retrieve some setting from a device and then initialize some UI controls with those settings. It may take a few seconds to complete the retrieval so I don't want the code to continue until after the retrieval (async).
I have read countless posts on many websites including this one and read many tutorials. None seem to work for me.
Also, in the interest of encapsulation, I want to keep the details within the device object.
When I run the app I see the print from the initializing method before I see the print from the method.
// Initializing method
brightnessLevel = 100
device.WhatIsTheBrightnessLevel(level: &brightnessLevel)
print("The brightness level is \(brightnessLevel)")
// method with the data retrieval code
func WhatIsTheBrightnessLevel(level brightness: inout Int) -> CResults
{
var brightness: Int
var characteristic: HMCharacteristic
var name: String
var results: CResults
var timeout: DispatchTime
var timeoutResult: DispatchTimeoutResult
// Refresh the value by querying the lightbulb
name = m_lightBulbName
characteristic = m_brightnessCharacteristic!
brightness = 100
timeout = DispatchTime.now() + .seconds(CLightBulb.READ_VALUE_TIMEOUT)
timeoutResult = .success
results = CResults()
results.SetResult(code: CResults.code.success)
let dispatchGroup = DispatchGroup()
DispatchQueue.global(qos: .userInteractive).async
{
//let dispatchGroup = DispatchGroup()
dispatchGroup.enter()
characteristic.readValue(completionHandler:
{ (error) in
if error != nil
{
results.SetResult(code: CResults.code.homeKitError)
results.SetHomeKitDescription(text: error!.localizedDescription)
print("Error in reading the brightness level for \(name): \(error!.localizedDescription)")
}
else
{
brightness = characteristic.value as! Int
print("CLightBulb: -->Read the brightness level. It is \(brightness) at " + Date().description(with: Locale.current))
}
dispatchGroup.leave()
})
timeoutResult = dispatchGroup.wait(timeout: timeout)
if (timeoutResult == .timedOut)
{
results.SetResult(code: CResults.code.timedOut)
}
else
{
print("CLightBulb: (After wait) The brightness level is \(brightness) at " + Date().description(with: Locale.current))
self.m_brightnessLevel = brightness
}
}
return(results)
}
Thank you!
If you're going to wrap an async function with your own function, it's generally best to give your wrapper function a completion handler as well. Notice the call to your completion handler. This is where you'd pass the resulting values (i.e. within the closure):
func getBrightness(characteristic: HMCharacteristic, completion: #escaping (Int?, Error?) -> Void) {
characteristic.readValue { (error) in
//Program flows here second
if error == nil {
completion(characteristic.value as? Int, nil)
} else {
completion(nil, error)
}
}
//Program flows here first
}
Then when you call your function, you just need to make sure that you're handling the results within the completion handler (i.e. closure):
getBrightness(characteristic: characteristic) { (value, error) in
//Program flows here second
if error == nil {
if let value = value {
print(value)
}
} else {
print("an error occurred: \(error.debugDescription)")
}
}
//Program flows here first
Always keep in mind that code will flow through before the async function completes. So you have to structure your code so that anything that's depending on the value or error returned, doesn't get executed before completion.