One to many webrtc - swift

I want to create an "one to many" (with the max of 3 devices) webrtc setup. I have one device that is my main device. Other devices are connecting to that device. You can think about an walky talky. With one device who they are connecting to.
I have this code that works with an one to one connection.
import AVFoundation
import UIKit
import WebRTC
import SocketIO
import CoreTelephony
import ReachabilitySwift
let TAG = "ViewController"
let AUDIO_TRACK_ID = TAG + "AUDIO"
let LOCAL_MEDIA_STREAM_ID = TAG + "STREAM"
class ViewController: UIViewController, RTCPeerConnectionDelegate, RTCDataChannelDelegate {
var mediaStream: RTCMediaStream!
var localAudioTrack: RTCAudioTrack!
var remoteAudioTrack: RTCAudioTrack!
var dataChannel: RTCDataChannel!
var dataChannelRemote: RTCDataChannel!
var roomName: String!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
initWebRTC();
sigConnect(wsUrl: "http://192.168.1.69:3000");
localAudioTrack = peerConnectionFactory.audioTrack(withTrackId: AUDIO_TRACK_ID)
mediaStream = peerConnectionFactory.mediaStream(withStreamId: LOCAL_MEDIA_STREAM_ID)
mediaStream.addAudioTrack(localAudioTrack)
}
func getRoomName() -> String {
return (roomName == nil || roomName.isEmpty) ? "_defaultroom": roomName;
}
// webrtc
var peerConnectionFactory: RTCPeerConnectionFactory! = nil
var peerConnection: RTCPeerConnection! = nil
var mediaConstraints: RTCMediaConstraints! = nil
var socket: SocketIOClient! = nil
var wsServerUrl: String! = nil
var peerStarted: Bool = false
func initWebRTC() {
RTCInitializeSSL()
peerConnectionFactory = RTCPeerConnectionFactory()
let mandatoryConstraints = ["OfferToReceiveAudio": "true", "OfferToReceiveVideo": "false"]
let optionalConstraints = [ "DtlsSrtpKeyAgreement": "true", "RtpDataChannels" : "true", "internalSctpDataChannels" : "true"]
mediaConstraints = RTCMediaConstraints.init(mandatoryConstraints: mandatoryConstraints, optionalConstraints: optionalConstraints)
}
func connect() {
if (!peerStarted) {
sendOffer()
peerStarted = true
}
}
func hangUp() {
sendDisconnect()
stop()
}
func stop() {
if (peerConnection != nil) {
peerConnection.close()
peerConnection = nil
peerStarted = false
}
}
func prepareNewConnection() -> RTCPeerConnection {
var icsServers: [RTCIceServer] = []
icsServers.append(RTCIceServer(urlStrings: ["stun:stun.l.google.com:19302"], username:"",credential: ""))
let rtcConfig: RTCConfiguration = RTCConfiguration()
rtcConfig.tcpCandidatePolicy = RTCTcpCandidatePolicy.disabled
rtcConfig.bundlePolicy = RTCBundlePolicy.maxBundle
rtcConfig.rtcpMuxPolicy = RTCRtcpMuxPolicy.require
rtcConfig.iceServers = icsServers;
peerConnection = peerConnectionFactory.peerConnection(with: rtcConfig, constraints: mediaConstraints, delegate: self)
peerConnection.add(mediaStream);
let tt = RTCDataChannelConfiguration();
tt.isOrdered = false;
self.dataChannel = peerConnection.dataChannel(forLabel: "testt", configuration: tt)
self.dataChannel.delegate = self
print("Make datachannel")
return peerConnection;
}
// RTCPeerConnectionDelegate - begin [ ///////////////////////////////////////////////////////////////////////////////
/** Called when the SignalingState changed. */
public func peerConnection(_ peerConnection: RTCPeerConnection, didChange stateChanged: RTCSignalingState){
print("signal state: \(stateChanged.rawValue)")
}
/** Called when media is received on a new stream from remote peer. */
public func peerConnection(_ peerConnection: RTCPeerConnection, didAdd stream: RTCMediaStream){
if (peerConnection == nil) {
return
}
if (stream.audioTracks.count > 1) {
print("Weird-looking stream: " + stream.description)
return
}
}
/** Called when a remote peer closes a stream. */
public func peerConnection(_ peerConnection: RTCPeerConnection, didRemove stream: RTCMediaStream){}
/** Called when negotiation is needed, for example ICE has restarted. */
public func peerConnectionShouldNegotiate(_ peerConnection: RTCPeerConnection){}
/** Called any time the IceConnectionState changes. */
public func peerConnection(_ peerConnection: RTCPeerConnection, didChange newState: RTCIceConnectionState){}
/** Called any time the IceGatheringState changes. */
public func peerConnection(_ peerConnection: RTCPeerConnection, didChange newState: RTCIceGatheringState){}
/** New ice candidate has been found. */
public func peerConnection(_ peerConnection: RTCPeerConnection, didGenerate candidate: RTCIceCandidate){
print("iceCandidate: " + candidate.description)
let json:[String: AnyObject] = [
"type" : "candidate" as AnyObject,
"sdpMLineIndex" : candidate.sdpMLineIndex as AnyObject,
"sdpMid" : candidate.sdpMid as AnyObject,
"candidate" : candidate.sdp as AnyObject
]
sigSendIce(msg: json as NSDictionary)
}
/** Called when a group of local Ice candidates have been removed. */
public func peerConnection(_ peerConnection: RTCPeerConnection, didRemove candidates: [RTCIceCandidate]){}
/** New data channel has been opened. */
public func peerConnection(_ peerConnection: RTCPeerConnection, didOpen dataChannel: RTCDataChannel){
print("Datachannel is open, name: \(dataChannel.label)")
dataChannel.delegate = self
self.dataChannelRemote = dataChannel
}
// RTCPeerConnectionDelegate - end ]/////////////////////////////////////////////////////////////////////////////////
public func dataChannel(_ dataChannel: RTCDataChannel, didReceiveMessageWith buffer: RTCDataBuffer){
print("iets ontvangen");
}
public func dataChannelDidChangeState(_ dataChannel: RTCDataChannel){
print("channel.state \(dataChannel.readyState.rawValue)");
}
func sendData(message: String) {
let newData = message.data(using: String.Encoding.utf8)
let dataBuff = RTCDataBuffer(data: newData!, isBinary: false)
self.dataChannel.sendData(dataBuff)
}
func onOffer(sdp:RTCSessionDescription) {
print("on offer shizzle")
setOffer(sdp: sdp)
sendAnswer()
peerStarted = true;
}
func onAnswer(sdp:RTCSessionDescription) {
setAnswer(sdp: sdp)
}
func onCandidate(candidate:RTCIceCandidate) {
peerConnection.add(candidate)
}
func sendSDP(sdp:RTCSessionDescription) {
print("Converting sdp...")
let json:[String: AnyObject] = [
"type" : sdp.type.rawValue as AnyObject,
"sdp" : sdp.sdp.description as AnyObject
]
sigSend(msg: json as NSDictionary);
}
func sendOffer() {
peerConnection = prepareNewConnection();
peerConnection.offer(for: mediaConstraints) { (RTCSessionDescription, Error) in
if(Error == nil){
print("send offer")
self.peerConnection.setLocalDescription(RTCSessionDescription!, completionHandler: { (Error) in
print("Sending: SDP")
print(RTCSessionDescription as Any)
self.sendSDP(sdp: RTCSessionDescription!)
})
} else {
print("sdp creation error: \(Error)")
}
}
}
func setOffer(sdp:RTCSessionDescription) {
if (peerConnection != nil) {
print("peer connection already exists")
}
peerConnection = prepareNewConnection();
peerConnection.setRemoteDescription(sdp) { (Error) in
}
}
func sendAnswer() {
print("sending Answer. Creating remote session description...")
if (peerConnection == nil) {
print("peerConnection NOT exist!")
return
}
peerConnection.answer(for: mediaConstraints) { (RTCSessionDescription, Error) in
print("ice shizzle")
if(Error == nil){
self.peerConnection.setLocalDescription(RTCSessionDescription!, completionHandler: { (Error) in
print("Sending: SDP")
print(RTCSessionDescription as Any)
self.sendSDP(sdp: RTCSessionDescription!)
})
} else {
print("sdp creation error: \(Error)")
}
}
}
func setAnswer(sdp:RTCSessionDescription) {
if (peerConnection == nil) {
print("peerConnection NOT exist!")
return
}
peerConnection.setRemoteDescription(sdp) { (Error) in
print("remote description")
}
}
func sendDisconnect() {
let json:[String: AnyObject] = [
"type" : "user disconnected" as AnyObject
]
sigSend(msg: json as NSDictionary);
}
// websocket related operations
func sigConnect(wsUrl:String) {
wsServerUrl = wsUrl;
print("connecting to " + wsServerUrl)
socket = SocketIOClient(socketURL: NSURL(string: wsServerUrl)! as URL)
socket.on("connect") { data in
print("WebSocket connection opened to: " + self.wsServerUrl);
self.sigEnter();
}
socket.on("disconnect") { data in
print("WebSocket connection closed.")
}
socket.on("message") { (data, emitter) in
if (data.count == 0) {
return
}
let json = data[0] as! NSDictionary
print("WSS->C: " + json.description);
let type = json["type"] as! Int
if (type == RTCSdpType.offer.rawValue) {
print("Received offer, set offer, sending answer....");
let sdp = RTCSessionDescription(type: RTCSdpType(rawValue: type)!, sdp: json["sdp"] as! String)
self.onOffer(sdp: sdp);
} else if (type == RTCSdpType.answer.rawValue && self.peerStarted) {
print("Received answer, setting answer SDP");
let sdp = RTCSessionDescription(type: RTCSdpType(rawValue: type)!, sdp: json["sdp"] as! String)
self.onAnswer(sdp: sdp);
} else {
print("Unexpected websocket message");
}
}
socket.on("ice") { (data, emitter) in
if (data.count == 0) {
return
}
let json = data[0] as! NSDictionary
print("WSS->C: " + json.description);
let type = json["type"] as! String
if (type == "candidate" && self.peerStarted) {
print("Received ICE candidate...");
let candidate = RTCIceCandidate(
sdp: json["candidate"] as! String,
sdpMLineIndex: Int32(json["sdpMLineIndex"] as! Int),
sdpMid: json["sdpMid"] as? String)
self.onCandidate(candidate: candidate);
} else {
print("Unexpected websocket message");
}
}
socket.connect();
}
func sigRecoonect() {
socket.disconnect();
socket.connect();
}
func sigEnter() {
let roomName = getRoomName();
print("Entering room: " + roomName);
socket.emit("enter", roomName);
}
func sigSend(msg:NSDictionary) {
socket.emit("message", msg)
}
func sigSendIce(msg:NSDictionary) {
socket.emit("ice", msg)
}
}
So I thought that I need an array with the peers. And the mediaStream, localAudioTrack and the dataChannel needs to be one object because the local audio is the same? Are there good solutions for this? Because I don't know how to properly implement this.
I am investigating different questions and project referencing to an multi call webrtc setup.
I saw this (website) webrtc setup at GitHub:
https://github.com/anoek/webrtc-group-chat-example/blob/master/client.html
I'm going to try to reverse engineer this to swift:). Any help is really appreciated.

I would suggest against a one-to-many architecture where a single device needs to send its media to all others. This breaks awfully fast (like after 2-3 devices it needs to connect to).
The reason for that is that uplinks are usually limited in capacity and even when they aren't, devices aren't really geared to streaming so much data to many other devices.
To do what you want at "scale", use a server component that routes media to the other devices. Look at https://jitsi.org/ and http://www.kurento.org/ for starting points.

What you're trying to achieve can be achieved by multi-peer connection. Where one creates multiple peer connection with others which is a mesh topology from application level of view. This is most straight forward way and if it's the case that you're recently learning WebRTC and learnt how to implement a peer-connection you may try how to handle multiple peer connections. Look at this for more details on it click. But in practical where you will have dozens of clients, it will become hard because it will eats up the hardware resources and bandwidth. In that case what people do is: they maintain a shared server where all the client connects to and that server mixes the individual streams and distribute it. Which is a start topology. There are some well-known services for this like- tokbox, jitsi-meet.
You may also like to look into SFU model- https://webrtcglossary.com/sfu/

Related

iOS screen sharing (ReplayKit) using WebRTC in swift

I implemented webrtc SDK for video calling and its working fine . During video call user can share screen with another user.
I am using RePlayKit for screen sharing.
Here is my code
class SampleHandler: RPBroadcastSampleHandler {
var peerConnectionFactory: RTCPeerConnectionFactory?
var localVideoSource: RTCVideoSource?
var videoCapturer: RTCVideoCapturer?
var peerConnection: RTCPeerConnection?
var localVideoTrack: RTCVideoTrack?
var disconnectSemaphore: DispatchSemaphore?
var videodelegate:VideoViewExtensionDelegate?
var signalClient: SignalingClient? = nil
let config = Config.default
let peerConnectionfactory: RTCPeerConnectionFactory = {
RTCInitializeSSL()
let videoEncoderFactory = RTCDefaultVideoEncoderFactory()
let videoDecoderFactory = RTCDefaultVideoDecoderFactory()
return RTCPeerConnectionFactory(encoderFactory: videoEncoderFactory, decoderFactory: videoDecoderFactory)
}()
private let mediaConstrains = [kRTCMediaConstraintsOfferToReceiveAudio: kRTCMediaConstraintsValueFalse,
kRTCMediaConstraintsOfferToReceiveVideo: kRTCMediaConstraintsValueTrue]
static let kAudioSampleType = RPSampleBufferType.audioMic
override func broadcastStarted(withSetupInfo setupInfo: [String : NSObject]?) {
self.SetupVideo()
}
override func broadcastPaused() {
// User has requested to pause the broadcast. Samples will stop being delivered.
// self.audioTrack?.isEnabled = false
// self.screenTrack?.isEnabled = false
}
override func broadcastResumed() {
// User has requested to resume the broadcast. Samples delivery will resume.
// self.audioTrack?.isEnabled = true
// self.screenTrack?.isEnabled = true
}
override func broadcastFinished() {
// User has requested to finish the broadcast.
}
func SetupVideo() {
if #available(iOS 13.0, *) {
let webSocketProvider: WebSocketProvider
webSocketProvider = NativeWebSocket(url: self.config.signalingServerUrl)
self.signalClient = SignalingClient(webSocket: webSocketProvider)
let config = RTCConfiguration()
// config.iceServers = [RTCIceServer(urlStrings: iceServers)]
config.iceServers = [RTCIceServer(urlStrings:["//turn & sturn serber url"],
username:"//username",
credential:"//password")]
// Unified plan is more superior than planB
// config.sdpSemantics = .unifiedPlan
// gatherContinually will let WebRTC to listen to any network changes and send any new candidates to the other client
config.continualGatheringPolicy = .gatherContinually
let screenSharefactory = self.peerConnectionfactory
let constraints = RTCMediaConstraints(mandatoryConstraints: nil,
optionalConstraints: ["DtlsSrtpKeyAgreement":kRTCMediaConstraintsValueTrue])
self.peerConnection = screenSharefactory.peerConnection(with: config, constraints: constraints, delegate: nil)
self.peerConnection?.delegate = self
self.localVideoSource = screenSharefactory.videoSource()
self.videoCapturer = RTCVideoCapturer(delegate: self.localVideoSource!)
self.localVideoTrack = screenSharefactory.videoTrack(with: self.localVideoSource!, trackId:"video0")
// let videoSender = newpeerConnection.sender(withKind: kRTCMediaStreamTrackKindVideo, streamId: "stream")
// videoSender.track = videoTrack
let mediaStream: RTCMediaStream = (screenSharefactory.mediaStream(withStreamId: "1"))
mediaStream.addVideoTrack(self.localVideoTrack!)
self.peerConnection?.add(mediaStream)
self.offer(peerconnection: self.peerConnection!) { (sdp) in
self.signalClient?.send(sdp: sdp)
}
}
}
func offer(peerconnection : RTCPeerConnection ,completion: #escaping (_ sdp: RTCSessionDescription) -> Void) {
let constrains = RTCMediaConstraints(mandatoryConstraints: self.mediaConstrains,
optionalConstraints: nil)
peerconnection.offer(for: constrains) { (sdp, error) in
guard let sdp = sdp else {
return
}
peerconnection.setLocalDescription(sdp, completionHandler: { (error) in
completion(sdp)
})
}
}
override func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, with sampleBufferType: RPSampleBufferType) {
switch sampleBufferType {
case RPSampleBufferType.video:
guard let imageBuffer: CVImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
break
}
let rtcpixelBuffer = RTCCVPixelBuffer(pixelBuffer: imageBuffer)
let timeStampNs: Int64 = Int64(CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) * 1000000000)
let videoFrame = RTCVideoFrame(buffer: rtcpixelBuffer, rotation: RTCVideoRotation._0, timeStampNs: timeStampNs)
print(videoFrame)
self.localVideoSource?.capturer(self.videoCapturer!, didCapture: videoFrame)
break
case RPSampleBufferType.audioApp:
if (SampleHandler.kAudioSampleType == RPSampleBufferType.audioApp) {
// ExampleCoreAudioDeviceCapturerCallback(audioDevice, sampleBuffer)
}
break
case RPSampleBufferType.audioMic:
if (SampleHandler.kAudioSampleType == RPSampleBufferType.audioMic) {
}
break
#unknown default:
return
}
}
}
extension SampleHandler: RTCPeerConnectionDelegate {
func peerConnection(_ peerConnection: RTCPeerConnection, didChange stateChanged: RTCSignalingState) {
debugPrint("peerConnection new signaling state: \(stateChanged)")
}
func peerConnection(_ peerConnection: RTCPeerConnection, didAdd stream: RTCMediaStream) {
debugPrint("peerConnection did add stream")
}
func peerConnection(_ peerConnection: RTCPeerConnection, didRemove stream: RTCMediaStream) {
debugPrint("peerConnection did remote stream")
}
func peerConnectionShouldNegotiate(_ peerConnection: RTCPeerConnection) {
debugPrint("peerConnection should negotiate")
}
func peerConnection(_ peerConnection: RTCPeerConnection, didChange newState: RTCIceConnectionState) {
debugPrint("peerConnection new connection state: \(newState)")
}
func peerConnection(_ peerConnection: RTCPeerConnection, didChange newState: RTCIceGatheringState) {
debugPrint("peerConnection new gathering state: \(newState)")
}
func peerConnection(_ peerConnection: RTCPeerConnection, didGenerate candidate: RTCIceCandidate) {
debugPrint("peerConnection did Generate")
}
func peerConnection(_ peerConnection: RTCPeerConnection, didRemove candidates: [RTCIceCandidate]) {
debugPrint("peerConnection did remove candidate(s)")
}
func peerConnection(_ peerConnection: RTCPeerConnection, didOpen dataChannel: RTCDataChannel) {
debugPrint("peerConnection did open data channel")
// self.remoteDataChannel = dataChannel
}
}
extension SampleHandler: RTCDataChannelDelegate {
func dataChannelDidChangeState(_ dataChannel: RTCDataChannel) {
debugPrint("dataChannel did change state: \(dataChannel.readyState)")
}
func dataChannel(_ dataChannel: RTCDataChannel, didReceiveMessageWith buffer: RTCDataBuffer) {
}
}
I am using this WEBRTC project https://github.com/stasel/WebRTC-iOS
I am getting the CMSampleBuffer data and RTCVideoFrame and passing correctly.
CMSampleBuffer Data for refrence.
CMSampleBuffer 0x100918370 retainCount: 5 allocator: 0x1e32175e0
invalid = NO
dataReady = YES
makeDataReadyCallback = 0x0
makeDataReadyRefcon = 0x0
formatDescription = <CMAudioFormatDescription 0x282bf0e60 [0x1e32175e0]> {
mediaType:'soun'
mediaSubType:'lpcm'
mediaSpecific: {
ASBD: {
mSampleRate: 44100.000000
mFormatID: 'lpcm'
mFormatFlags: 0xe
mBytesPerPacket: 4
mFramesPerPacket: 1
mBytesPerFrame: 4
mChannelsPerFrame: 2
mBitsPerChannel: 16 }
cookie: {(null)}
ACL: {(null)}
FormatList Array: {
Index: 0
ChannelLayoutTag: 0x650002
ASBD: {
mSampleRate: 44100.000000
mFormatID: 'lpcm'
mFormatFlags: 0xe
mBytesPerPacket: 4
mFramesPerPacket: 1
mBytesPerFrame: 4
mChannelsPerFrame: 2
mBitsPerChannel: 16 }}
}
extensions: {(null)}
}
sbufToTrackReadiness = 0x0
numSamples = 1024
outputPTS = {190371138262458/1000000000 = 190371.138}(based on cachedOutputPresentationTimeStamp)
sampleTimingArray[1] = {
{PTS = {190371138262458/1000000000 = 190371.138}, DTS = {INVALID}, duration = {1/44100 = 0.000}},
}
dataBuffer = 0x2828f1050
I am stuck here ,don't know what is the wrong with my code.ANy help is highly appreciated.
webrtc is peer to peer connection. If you want to share your screen with another one.
You have to create cvpixelBuffer from screen (use a class called RTCCustomcaptureframe) and create webrtcclient to connect with another device. (For simpler setup webrtc client, just split it)
You cannot connect 3 device with a single peer connection.

How to pass data from delegate method to the observable's onNext method in RxSwift?

I have manager class which will connect and manage the data and state of the Bluetooth device.
The manager class conforms to IWDeviceManagerDelegate and has a method which gives the weight data func onReceiveWeightData(_ device: IWDevice!, data: IWWeightData!).
Once I call listenToWeight() from any controller I want to give the data using Observable.
How I fire an onNext event with the data of onReceiveWeightData method to listenToWeight observable?
Below is the code.
class WeightMachineManager: NSObject {
func setup() {
IWDeviceManager.shared()?.delegate = self
IWDeviceManager.shared()?.initMgr()
}
func listenToWeight() -> Observable<IWWeightData> {
let tag = WeightMachineManager.tag
if let connectedDevice = connectedDevice {
IWDeviceManager.shared()?.add(connectedDevice, callback: { (device, code) in
if code == .success {
print("\(tag)[SUCCESS] Device added successfully.")
} else {
print("\(tag)[FAILURE] Failed to add device.")
}
})
} else {
print("\(tag)[FAILURE] Couldn't find any device to connect.")
}
}
}
extension WeightMachineManager: IWDeviceManagerDelegate {
func onReceiveWeightData(_ device: IWDevice!, data: IWWeightData!) {
// TODO:- Pass this data in the onNext event of listenToWeight's observable.
}
}
I've made a lot of assumptions in the below, but the result should look something like this:
class WeightMachineManager {
var connectedDevice: IWDevice?
func setup() {
IWDeviceManager.shared()?.initMgr()
}
func listenToWeight() -> Observable<IWWeightData> {
if let connectedDevice = connectedDevice, let deviceManager = IWDeviceManager.shared() {
return deviceManager.rx.add(connectedDevice)
.flatMap { deviceManager.rx.receivedWeightData() } // maybe this should be flatMapLatest or flatMapFirst. It depends on what is calling listenToWeight() and when.
}
else {
return .error(NSError.init(domain: "WeightMachineManager", code: -1, userInfo: nil))
}
}
}
extension IWDeviceManager: HasDelegate {
public typealias Delegate = IWDeviceManagerDelegate
}
class IWDeviceManagerDelegateProxy
: DelegateProxy<IWDeviceManager, IWDeviceManagerDelegate>
, DelegateProxyType
, IWDeviceManagerDelegate {
init(parentObject: IWDeviceManager) {
super.init(parentObject: parentObject, delegateProxy: IWDeviceManagerDelegateProxy.self)
}
public static func registerKnownImplementations() {
self.register { IWDeviceManagerDelegateProxy(parentObject: $0) }
}
}
extension Reactive where Base: IWDeviceManager {
var delegate: IWDeviceManagerDelegateProxy {
return IWDeviceManagerDelegateProxy.proxy(for: base)
}
func add(_ device: IWDevice) -> Observable<Void> {
return Observable.create { observer in
self.base.add(device, callback: { device, code in
if code == .success {
observer.onNext(())
observer.onCompleted()
}
else {
observer.onError(NSError.init(domain: "IWDeviceManager", code: -1, userInfo: nil))
}
})
return Disposables.create()
}
}
func receivedWeightData() -> Observable<IWWeightData> {
return delegate.methodInvoked(#selector(IWDeviceManagerDelegate.onReceiveWeightData(_:data:)))
.map { $0[1] as! IWWeightData }
}
}

AVCaptureMetadataObjectDelegate not receiving callback

I am making QR scanner. My code is working when all of it written in one place inside ViewController but when I modularised it then I am not getting callback inside AVCaptureMetadataOutputObjectsDelegate.
import Foundation
import UIKit
import AVFoundation
class CameraSource : NSObject {
private var session : AVCaptureSession?
private var inputDevice : AVCaptureDeviceInput?
private var videoPreviewLayer : AVCaptureVideoPreviewLayer?
private var captureMetadataOutput : AVCaptureMetadataOutput?
func setCaptureMetadataOutput() {
self.captureMetadataOutput = nil
self.captureMetadataOutput = AVCaptureMetadataOutput()
}
func getCaptureMetadataOutput() -> AVCaptureMetadataOutput? {
return self.captureMetadataOutput
}
func setInputDevice(inputDevice : AVCaptureDeviceInput?) {
self.inputDevice = inputDevice
}
func getInputDevice() -> AVCaptureDeviceInput? {
return self.inputDevice
}
func setSession(session : AVCaptureSession?) {
self.session = session
}
func getSession() -> AVCaptureSession? {
return self.session
}
func setMetadataObjects(metaObjects : [AVMetadataObject.ObjectType], delegate : AVCaptureMetadataOutputObjectsDelegate) {
assert(self.captureMetadataOutput != nil)
self.captureMetadataOutput!.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
self.captureMetadataOutput!.metadataObjectTypes = metaObjects
}
func initViewoPreviewLayer(videoGravity : AVLayerVideoGravity, orientation : AVCaptureVideoOrientation) {
assert(session != nil)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: session!)
videoPreviewLayer!.videoGravity = videoGravity
videoPreviewLayer!.connection!.videoOrientation = orientation
}
func addVideoLayerToImageView(imageView : UIImageView) {
assert(self.videoPreviewLayer != nil)
imageView.layer.addSublayer(self.videoPreviewLayer!)
self.videoPreviewLayer!.frame = imageView.bounds
}
func startSession() {
assert(session != nil)
self.session!.startRunning()
}
/*==========================================================================
STATIC FUNCTIONS
==========================================================================*/
static func getBackCamera() -> AVCaptureDevice {
return AVCaptureDevice.default(.builtInWideAngleCamera, for: AVMediaType.video, position: .back)!
}
static func getFrontCamera() -> AVCaptureDevice {
return AVCaptureDevice.default(.builtInWideAngleCamera, for: AVMediaType.video, position: .front)!
}
static func isCameraAvailable() -> Bool {
if #available(iOS 10.0, *) {
let count : Int = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera],
mediaType: AVMediaType.video,
position: .unspecified).devices.count
if count > 0 { return true }
}
else {
let count = AVCaptureDevice.devices(for: AVMediaType.video).count
if count > 0 { return true }
}
return false
}
/*==========================================================================
CAMERA BUILDER CLASS
==========================================================================*/
class Builder {
var cameraSource : CameraSource
init() {
cameraSource = CameraSource()
}
func createSession() -> Builder {
if (cameraSource.getSession() != nil) {
cameraSource.setSession(session: nil)
}
cameraSource.setSession(session: AVCaptureSession())
return self
}
func setSessionPreset(preset : AVCaptureSession.Preset) -> Builder {
assert(cameraSource.getSession() != nil)
cameraSource.getSession()!.sessionPreset = preset
return self
}
func attachInputDevice(camera : AVCaptureDevice) throws -> Builder {
try self.prepareInputDevice(camera: camera)
try self.addInputToSession()
assert(cameraSource.inputDevice != nil)
return self
}
func addOutputToSessionForMetaData() throws -> CameraSource {
cameraSource.setCaptureMetadataOutput()
assert(cameraSource.getSession() != nil && cameraSource.getCaptureMetadataOutput() != nil)
if !cameraSource.getSession()!.canAddOutput(cameraSource.getCaptureMetadataOutput()!) {
throw AppErrorCode.cameraError("Unable to attach output to camera session")
}
cameraSource.getSession()!.addOutput(cameraSource.getCaptureMetadataOutput()!)
return self.cameraSource
}
/*==========================================================================
BUILDER PRIVATE FUNCTIONS
==========================================================================*/
private func prepareInputDevice(camera : AVCaptureDevice) throws {
do {
let inputDevice = try AVCaptureDeviceInput(device: camera)
cameraSource.setInputDevice(inputDevice: inputDevice)
} catch let error as NSError {
print(error.localizedDescription)
throw AppErrorCode.cameraError("Unable to attach input to camera session")
}
}
private func addInputToSession() throws {
if(cameraSource.getSession() == nil) {
throw AppErrorCode.cameraError("Unable to create camera session")
}
assert(cameraSource.getInputDevice() != nil && cameraSource.getSession()!.canAddInput(cameraSource.getInputDevice()!))
cameraSource.getSession()!.addInput(cameraSource.getInputDevice()!)
}
}
}
My QR scanner Code looks like
import UIKit
import Foundation
import AVFoundation
protocol QRScannerDelegate {
func scannedData(_ scannedString : String)
}
class QRScanner : NSObject {
private var cameraSource : CameraSource?
var delegate : QRScannerDelegate?
func prepareCamera (delegate : QRScannerDelegate) throws -> QRScanner {
do {
self.delegate = delegate
self.cameraSource = try CameraSource
.Builder()
.createSession()
.setSessionPreset(preset: .photo)
.attachInputDevice(camera: CameraSource.getBackCamera())
.addOutputToSessionForMetaData()
self.cameraSource!.setMetadataObjects(metaObjects: [.qr], delegate: self as AVCaptureMetadataOutputObjectsDelegate)
} catch let err as NSError {
print(err.localizedDescription)
self.cameraSource = nil
throw AppErrorCode.cameraError("Unable to process camera with one or more issue")
}
return self
}
func initViewoPreviewLayer(videoGravity : AVLayerVideoGravity, orientation : AVCaptureVideoOrientation) -> QRScanner{
assert(cameraSource != nil)
self.cameraSource!.initViewoPreviewLayer(videoGravity: videoGravity, orientation: orientation)
return self
}
func addVideoLayerToImageView(imageView : UIImageView) -> QRScanner{
assert(cameraSource != nil)
self.cameraSource!.addVideoLayerToImageView(imageView: imageView)
return self
}
func startSession() {
assert(cameraSource != nil)
self.cameraSource!.startSession()
}
}
extension QRScanner : AVCaptureMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
print("Delegate called")
if metadataObjects.count == 0 {
self.delegate?.scannedData("No Data")
} else {
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if metadataObj.type == AVMetadataObject.ObjectType.qr {
if metadataObj.stringValue != nil {
print("Scanner Getting data: \(metadataObj.stringValue!)")
self.delegate?.scannedData(metadataObj.stringValue!)
}
}
}
}
}
I have implemented the QRScannerDelegate in my ViewController but I am not getting anything in there. Moreover I am not getting callback inside AVCaptureMetadataOutputObjectsDelegate even.
I tried passing the ViewController instance as AVCaptureMetadataOutputObjectsDelegate then I was getting callback with the scanned info.
So My question is why is this happening?
1) When I am passing normal class as AVCaptureMetadataOutputObjectsDelegate I am not getting callback. But.
2) Whe I am passing UIViewController instance as AVCaptureMetadataOutputObjectsDelegate then I am able to get callback.
UPDATE
This is how I am calling prepareCamera from my View Controller
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
do {
try QRScanner().prepareCamera(delegate: self)
.initViewoPreviewLayer(videoGravity: .resizeAspectFill, orientation: .portrait)
.addVideoLayerToImageView(imageView: self.qrScannerImageView)
.startSession()
} catch {
print("Some Camera Error")
}
self.createOverlay()
}
Its hard to say for sure without knowing how you called prepareCamera as this is what triggers setMetadataObjectsDelegate but to me it looks like you may not be keeping a strong reference to QRScanner in your ViewController (instantiating it as in instance variable) Which could explain why the callback is getting hit when your ViewController is your AVCaptureMetadataOutputObjectsDelegate as the ViewController is still in memory.
It's also worth noting that if the ViewController is your QRScannerDelegate you will want to define delegate as weak var delegate : QRScannerDelegate? to prevent a memory leak.
EDIT:
Change
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
do {
try QRScanner().prepareCamera(delegate: self)
.initViewoPreviewLayer(videoGravity: .resizeAspectFill, orientation: .portrait)
.addVideoLayerToImageView(imageView: self.qrScannerImageView)
.startSession()
} catch {
print("Some Camera Error")
}
self.createOverlay()
}
to
var qrScanner = QRScanner()
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
do {
try self.qrScanner.prepareCamera(delegate: self)
.initViewoPreviewLayer(videoGravity: .resizeAspectFill, orientation: .portrait)
.addVideoLayerToImageView(imageView: self.qrScannerImageView)
.startSession()
} catch {
print("Some Camera Error")
}
self.createOverlay()
}
and change
protocol QRScannerDelegate {
func scannedData(_ scannedString : String)
}
to
protocol QRScannerDelegate: class {
func scannedData(_ scannedString : String)
}
To Allow a weak delegate
AVCaptureMetadataOutputObjectsDelegate is tough, but you can do some really cool stuff with it! So keep at it.
I pulled some QRScanner code I wrote a while ago and put it into a gist for you if you want to check it out. Its a bit more stripped down than what you have, but you may find it helpful.
https://gist.github.com/aChase55/733ea89af1bfa80c65971d3bc691f0b2

xmppStreamDidConnect not getting called in "swift framework"

I'm creating the framework in swift, using cocoapods I have added XMPP framework but somehow can't able to connect to my host :
my set up:
class XMPPController: NSObject ,XMPPStreamDelegate{
var XMPP_HOST = "**************";
var userJid:XMPPJID = XMPPJID();
var password = "";
var xmppStream:XMPPStream;
init(jid: String, password: String) {
if let userjabberid = XMPPJID(string: jid) {
self.userJid = userjabberid;
}
self.password = password;
self.xmppStream = XMPPStream();
self.xmppStream.hostName = XMPP_HOST;
self.xmppStream.hostPort = 5222;
self.xmppStream.startTLSPolicy = XMPPStreamStartTLSPolicy.allowed;
self.xmppStream.myJID = self.userJid;
super.init();
self.xmppStream.addDelegate(self, delegateQueue: DispatchQueue.main)
}
and my connect method:
func connect() {
if !self.xmppStream.isDisconnected {
return
}
do {
try self.xmppStream.connect(withTimeout: XMPPStreamTimeoutNone);
} catch let err {
print(err);
}
}
and my delegate methods:
func xmppStreamWillConnect(_ sender: XMPPStream) {
print("will connect");
}
func xmppStream(_ sender: XMPPStream, socketDidConnect socket: GCDAsyncSocket) {
print("socket")
}
func xmppStreamDidStartNegotiation(_ sender: XMPPStream) {
print("negotiate")
}
func xmppStream(_ sender: XMPPStream, didReceiveError error: DDXMLElement) {
print(error);
}
func xmppStreamDidDisconnect(_ sender: XMPPStream, withError error: Error?) {
print("disconnected");
}
func xmppStreamDidConnect(_ sender: XMPPStream) {
print("connected");
try! sender.authenticate(withPassword: self.password);
}
func xmppStreamDidAuthenticate(_ sender: XMPPStream) {
print("authenticated");
}
func xmppStream(_ sender: XMPPStream, didNotAuthenticate error: DDXMLElement) {
print("Stream: Fail to Authenticate");
}
here, only xmppStreamWillConnect gets called and all other delegates methods are not called.
You need to init XMPPController like below and call connect function:
self.xmppController = XMPPController(jid: String, password: String)
self.xmppController.xmppStream.addDelegate(self, delegateQueue: DispatchQueue.main)
self.xmppController.connect()
solved by making singleton of my class as fallows:
static let sharedInstance = XMPPController();
And calling it as :
XMPPController.sharedInstance.connect(Withjid: "***#dev.****.com", Andpassword: "password");
I too was having same issue, later discovered that I had not started mongooseIM server.
Download from here
Step1: Start server
mongooseimctl start
Step2: Check status
mongooseimctl status
Step3: Create/Register User
mongooseimctl register itsyourusername localhost itsapassword
Step 4: Use these credential in XMPP client framework.
Official Docs

Where is the audio stream added libjingle?

I can't find out where the audio stream is added to the 'speaker'. Is it possible that I modify the stream and add it later by myself? I have the feeling that libjingle is handling the stream and adding it.
I have added the libjingle part of my code:
import AVFoundation
import UIKit
let TAG = "ViewController"
let AUDIO_TRACK_ID = TAG + "AUDIO"
let LOCAL_MEDIA_STREAM_ID = TAG + "STREAM"
class ViewController: UIViewController, RTCSessionDescriptionDelegate, RTCPeerConnectionDelegate {
var mediaStream: RTCMediaStream!
var localAudioTrack: RTCAudioTrack!
var remoteAudioTrack: RTCAudioTrack!
var renderer: RTCEAGLVideoView!
var renderer_sub: RTCEAGLVideoView!
var roomName: String!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
initWebRTC();
sigConnect(wsUrl: "http://192.168.1.59:3000");
localAudioTrack = peerConnectionFactory.audioTrack(withID: AUDIO_TRACK_ID)
mediaStream = peerConnectionFactory.mediaStream(withLabel: LOCAL_MEDIA_STREAM_ID)
mediaStream.addAudioTrack(localAudioTrack)
}
var peerConnectionFactory: RTCPeerConnectionFactory! = nil
var peerConnection: RTCPeerConnection! = nil
var pcConstraints: RTCMediaConstraints! = nil
var audioConstraints: RTCMediaConstraints! = nil
var mediaConstraints: RTCMediaConstraints! = nil
var wsServerUrl: String! = nil
var peerStarted: Bool = false
func initWebRTC() {
RTCPeerConnectionFactory.initializeSSL()
peerConnectionFactory = RTCPeerConnectionFactory()
pcConstraints = RTCMediaConstraints()
audioConstraints = RTCMediaConstraints()
mediaConstraints = RTCMediaConstraints(
mandatoryConstraints: [
RTCPair(key: "OfferToReceiveAudio", value: "true"),
],
optionalConstraints: nil)
}
func prepareNewConnection() -> RTCPeerConnection {
var icsServers: [RTCICEServer] = []
icsServers.append(RTCICEServer(uri: NSURL(string: "stun:stun.l.google.com:19302") as URL!, username: "",
password: ""))
let rtcConfig: RTCConfiguration = RTCConfiguration()
rtcConfig.tcpCandidatePolicy = RTCTcpCandidatePolicy.disabled
rtcConfig.bundlePolicy = RTCBundlePolicy.maxBundle
rtcConfig.rtcpMuxPolicy = RTCRtcpMuxPolicy.require
peerConnection = peerConnectionFactory.peerConnection(withICEServers: icsServers, constraints: pcConstraints, delegate: self)
peerConnection.add(mediaStream);
return peerConnection;
}
func peerConnection(_ peerConnection: RTCPeerConnection!, signalingStateChanged stateChanged: RTCSignalingState) {
}
func peerConnection(_ peerConnection: RTCPeerConnection!, iceConnectionChanged newState: RTCICEConnectionState) {
}
func peerConnection(_ peerConnection: RTCPeerConnection!, iceGatheringChanged newState: RTCICEGatheringState) {
}
func peerConnection(_ peerConnection: RTCPeerConnection!, gotICECandidate candidate: RTCICECandidate!) {
if (candidate != nil) {
print("iceCandidate: " + candidate.description)
let json:[String: AnyObject] = [
"type" : "candidate" as AnyObject,
"sdpMLineIndex" : candidate.sdpMLineIndex as AnyObject,
"sdpMid" : candidate.sdpMid as AnyObject,
"candidate" : candidate.sdp as AnyObject
]
sigSend(msg: json as NSDictionary)
} else {
print("End of candidates. -------------------")
}
}
func peerConnection(_ peerConnection: RTCPeerConnection!, addedStream stream: RTCMediaStream!) {
if (peerConnection == nil) {
return
}
if (stream.audioTracks.count > 1) {
print("Weird-looking stream: " + stream.description)
return
}
}
func peerConnection(_ peerConnection: RTCPeerConnection!, removedStream stream: RTCMediaStream!) {
}
func peerConnection(_ peerConnection: RTCPeerConnection!, didOpen dataChannel: RTCDataChannel!) {
}
func peerConnection(onRenegotiationNeeded peerConnection: RTCPeerConnection!) {
}
}
My thought is that I can catch the audio stream in the function under this command. Is that correct? In addition, can I add the stream manually to the speaker?
func peerConnection(_ peerConnection: RTCPeerConnection!, addedStream stream: RTCMediaStream!) {
if (peerConnection == nil) {
return
}
if (stream.audioTracks.count > 1) {
print("Weird-looking stream: " + stream.description)
return
}
}
When the webRTC call is connected, the Webrtc stack uses platform APIs to play or record the audio. You can only control things like
Mute or unmute the audio stream
Use system APIs to increase or decrease volume or change audio configuration
You can't add stream manually to speaker but you can choose to change the default audio output to speaker or a headphone so that webrtc audio is redirected to correct output. This can be done using avfoundation APIs