Implement async socket recvmsg for RX and TX timestamping in Rust - sockets

Objective
This project is a test for using AsyncFd (from tokio) to wrap a socket file descriptor.
Code example
Socket file:
use nix::{
cmsg_space,
sys::{
socket::{ControlMessageOwned, MsgFlags, SockaddrIn},
time::TimeSpec,
},
};
use tokio::io::unix::{AsyncFd, TryIoError};
use std::{
io::{IoSlice, IoSliceMut},
marker::PhantomData,
os::unix::prelude::{AsRawFd, RawFd},
};
pub struct AsyncSocket<'a, T: 'a> {
inner: AsyncFd<RawFd>,
phantom: PhantomData<&'a T>,
}
impl<'a, T> AsyncSocket<'a, T> {
pub fn new(fd: RawFd) -> tokio::io::Result<Self> {
Ok(Self {
// inner: AsyncFd::new(fd)?,
inner: AsyncFd::new(fd)?,
phantom: PhantomData,
})
}
pub async fn write_to(
&'a self,
buffer: &'a [IoSlice<'_>; 1],
socket_address: &SockaddrIn,
) -> Result<usize, TryIoError> {
let mut guard = self.inner.writable().await.unwrap();
let flags = MsgFlags::empty();
let cmsgs = &mut [];
match guard.try_io(|inner| {
match nix::sys::socket::sendmsg(
inner.as_raw_fd(),
buffer,
cmsgs,
flags,
Some(socket_address),
) {
Ok(read_bytes) => Ok(read_bytes),
Err(would_block) => Err(std::io::Error::from_raw_os_error(would_block as i32)),
}
}) {
Ok(res) => match res {
Ok(read_bytes) => Ok(read_bytes),
Err(e) => {
eprintln!("Error {}", e);
Ok(0)
}
},
Err(e) => Err(e),
}
}
pub async fn read(
&'a self,
buffer: &'a mut [IoSliceMut<'_>; 1],
flags: MsgFlags,
) -> Result<usize, TryIoError> {
buffer[0].fill(0);
let mut guard = self.inner.readable().await.unwrap();
match guard.try_io(|inner| {
let sys_time = nix::time::clock_gettime(nix::time::ClockId::CLOCK_REALTIME).unwrap();
println!("Real clock {:?}", sys_time);
println!("FLAG = {:?}", flags);
match nix::sys::socket::recvmsg::<()>(
inner.as_raw_fd(),
buffer,
Some(&mut cmsg_space!(
nix::sys::socket::MsgFlags,
nix::sys::socket::TimestampingFlag,
nix::sys::socket::SockFlag
)),
flags,
) {
Ok(result) => {
let mut ts = TimeSpec::new(0, 0);
let mut _thw = TimeSpec::new(0, 0);
let control_messages: Vec<ControlMessageOwned> = result.cmsgs().collect();
println!("Control message length = {}", control_messages.len());
for c in control_messages {
match c {
ControlMessageOwned::ScmTimestampsns(timestamps) => {
_thw = timestamps.hw_raw;
ts = timestamps.system;
println!("Timestamps {:?}", timestamps);
}
ControlMessageOwned::ScmRights(_) => println!("ScmRights"),
ControlMessageOwned::ScmCredentials(_) => println!("ScmCredentials"),
ControlMessageOwned::ScmTimestamp(_) => println!("ScmTimestamp"),
ControlMessageOwned::ScmTimestampns(_) => println!("ScmTimestampns"),
ControlMessageOwned::Ipv4PacketInfo(_) => println!("Ipv4PacketInfo"),
ControlMessageOwned::Ipv6PacketInfo(_) => println!("Ipv6PacketInfo"),
ControlMessageOwned::Ipv4OrigDstAddr(_) => println!("Ipv4OrigDstAddr"),
ControlMessageOwned::Ipv6OrigDstAddr(_) => println!("Ipv6OrigDstAddr"),
ControlMessageOwned::UdpGroSegments(_) => println!("UdpGroSegments"),
ControlMessageOwned::RxqOvfl(_) => println!("RxqOvfl"),
ControlMessageOwned::Ipv4RecvErr(a, b) => {
println!("Received ipv4 Err {:?} from {:?}", a, b);
}
ControlMessageOwned::Ipv6RecvErr(_, _) => println!("Ipv6RecvErr"),
_ => println!("Other"),
}
}
let soft_diff = diff_systime(ts, sys_time);
// let hw_diff = diff_systime(thw, sys_time);
if soft_diff != sys_time {
let delta = std::time::Duration::from(soft_diff).as_micros();
println!("Soft Delta is {}", delta);
}
// } else if hw_diff != sys_time {
// // let delta = std::time::Duration::from(hw_diff).as_micros();
// // println!("Hard Delta is {}", delta);
// // }
return Ok(result.bytes);
}
Err(errno) => {
match errno {
nix::errno::Errno::EAGAIN => println!("EAGAIN Error"),
_ => println!("Other error {:?}", errno),
}
let error = std::io::Error::from_raw_os_error(errno as i32);
Err(error)
}
}
}) {
Ok(res) => match res {
Ok(read_bytes) => Ok(read_bytes),
Err(_e) => {
println!("Error from socket {:?}", std::io::Error::last_os_error());
Ok(0)
}
},
Err(e) => {
println!("Guard error {:?}", std::io::Error::last_os_error());
Err(e)
}
}
// }
}
}
impl<'a, T> AsRawFd for AsyncSocket<'a, T> {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
impl<'a, T> Drop for AsyncSocket<'a, T> {
fn drop(&mut self) {
let fd = self.inner.as_raw_fd();
unsafe { nix::libc::close(fd) };
}
}
fn diff_systime(first: TimeSpec, second: TimeSpec) -> TimeSpec {
if second > first {
second - first
} else {
first - second
}
}
Error.rs
#[derive(thiserror::Error, Debug)]
pub enum LibError {
AddrParseError(#[from] std::net::AddrParseError),
#[error(transparent)]
IO(#[from] std::io::Error),
OSError(#[from] nix::Error),
}
impl std::fmt::Display for LibError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", &self)
}
}
Main.rs:
use std::{
io::{IoSlice, IoSliceMut},
str::FromStr,
sync::{
atomic::{AtomicU8, Ordering},
Arc,
},
time::Duration,
};
mod error;
mod socket;
use nix::sys::socket::{
bind, setsockopt,
sockopt::{self},
AddressFamily, MsgFlags, SockFlag, SockProtocol, SockType, SockaddrIn, TimestampingFlag,
};
use socket::AsyncSocket;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let local_sock_addr = SockaddrIn::from_str("0.0.0.0:6790").unwrap();
let local_sock_addr1 = SockaddrIn::from_str("192.168.1.84:44581").unwrap();
let send_sock_addr = SockaddrIn::from_str("192.168.1.123:6790").unwrap();
let rsock = nix::sys::socket::socket(
AddressFamily::Inet,
SockType::Datagram,
SockFlag::all(),
SockProtocol::Udp,
)?;
let ssock = nix::sys::socket::socket(
AddressFamily::Inet,
SockType::Datagram,
SockFlag::all(),
SockProtocol::Udp,
)?;
// let sock_txtime = sock_txtime {
// clockid: nix::time::ClockId::CLOCK_MONOTONIC.as_raw(),
// flags: SOF_TXTIME_REPORT_ERRORS,
// };
setsockopt(rsock, sockopt::Timestamping, &TimestampingFlag::all())?;
setsockopt(ssock, sockopt::Timestamping, &TimestampingFlag::all())?;
// setsockopt(ssock, sockopt::ReuseAddr, &true)?;
// setsockopt(rsock, sockopt::ReuseAddr, &true)?;
// setsockopt(ssock, sockopt::TxTime, &sock_txtime)?;
bind(ssock, &local_sock_addr1)?;
bind(rsock, &local_sock_addr)?;
let recv_socket: AsyncSocket<i32> = AsyncSocket::new(rsock)?;
let send_socket: AsyncSocket<i32> = AsyncSocket::new(ssock)?;
let atomic_i = Arc::new(AtomicU8::new(1));
let mut read_buf = [0u8; 1024];
let mut iov2 = [IoSliceMut::new(&mut read_buf)];
// let mut rbuf1 = [0u8; 1024];
let mut rbuf2 = [0u8; 1024];
// let mut iov3 = [IoSliceMut::new(&mut rbuf1)];
let mut iov4 = [IoSliceMut::new(&mut rbuf2)];
loop {
tokio::select! {
read = recv_socket.read(&mut iov2, MsgFlags::empty()) => {
match read {
Ok(v) => {
println!("Recv sock Received {} bytes in mes {:?}", v, iov2[0].iter().take(v).collect::<Vec<&u8>>());
let i = atomic_i.load(Ordering::Relaxed);
let sbuf: Vec<u8> = (1u8..=i).map(|el| el).collect();
let iov1 = [IoSlice::new(&mut sbuf.as_slice())];
tokio::time::sleep(Duration::from_millis(15)).await;
let _ = recv_socket.write_to(&iov1, &local_sock_addr1).await;
},
Err(e) => println!("Recv Err {:?}", e),
}
},
_tick = tokio::time::sleep(Duration::from_millis(500)) => {
// println!("Tick");
let i = atomic_i.load(Ordering::Relaxed);
if i == 3 {
continue;
// In case you want the sending to last forever
// atomic_i
// .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |n| Some(n - n))
// .unwrap();
// break;
}
let sbuf: Vec<u8> = (1u8..=i).map(|el| el).collect();
let iov1 = [IoSlice::new(&mut sbuf.as_slice())];
let _ = send_socket.write_to(&iov1, &send_sock_addr).await;
// Calling read here results in a deadlock
println!("Message {} sent", i);
atomic_i.fetch_add(1, Ordering::Relaxed);
},
read2 = send_socket.read(&mut iov4, MsgFlags::empty()) => {
match read2 {
Ok(v) => {
println!("Send sock Received {} bytes in mes {:?}", v, iov4[0].iter().take(v).collect::<Vec<&u8>>());
// This second read call is done to retrieve any messages present in the Error queue (timestamps are there)
// match send_socket.read(&mut iov3, MsgFlags::MSG_ERRQUEUE).await {
// Ok(v) => println!("Send sock Received from Error queue {} bytes in mes {:?}", v, iov3[0].iter().take(v).collect::<Vec<&u8>>()),
// Err(e) => println!("Send Err {:?}", e),
// }
},
Err(e) => println!("Send Err {:?}", e),
}
},
// Adding this entry results in very inconsistent behavior for receiving Tx timestamps
// read1 = send_socket.read(&mut iov3, MsgFlags::MSG_ERRQUEUE) => {
// match read1 {
// Ok(v) => println!("Send sock Received from Error queue {} bytes in mes {:?}", v, iov3[0].iter().take(v).collect::<Vec<&u8>>()),
// Err(e) => println!("Send Err {:?}", e),
// }
// },
}
println!("\n")
}
// Ok(())
}
Cargo.toml:
[package]
name = "socket-timestamp-test"
version = "0.1.0"
edition = "2021"
[dependencies]
bytes = "1.3.0"
futures = "0.3.25"
log = "0.4.17"
mio = { version = "0.8.5", features = ["os-ext"] }
nix = { version = "0.26.1", features = ["socket"] }
num_enum = "0.5.7"
thiserror = "1.0.37"
tokio = { version = "1.2", features = [
"sync",
"net",
"macros",
"rt-multi-thread",
"time",
] }
Motivation
The motivation is to be able to access configuration that is not present in common user network libraries (such as tokio, std, socket2, smoltcp, etc), such as retrieving timestamp messages for RX and TX packets.
These values can be retrieved with a call to libc's recvmsg in a correctly configured socket (configured with SOF timestamping flags).
Tx timestamps can be retrieved from the Socket Error Queue, by accessing the socket's control messages.
Current problems
Tx timestamps are stored (when MSG_ERRQUEUE is used) in the error queue, but polling of the socket only happens when a message is received buffer. This results in the following behavior:
Socket 1 sends message
Timestamping stuff is queued
... no polling is triggered and the message is not retrieved
Socket 1 receives the response
If recvmsg is called with MSG_ERRQUEUE:
Socket is polled until all timestamps from the sent messages are received
If recvmsg is called without MSG_ERRQUEUE:
Socket is polled and receives the payload of the response, but no data from error queue is read (which is the expected behavior)
If two calls to recvmsg are placed in the select! closure, it results in inconsistent behavior where sometimes you get error queued messages but sometimes you don't. This means that a second another call to recvmsg is required to receive the remaining Tx timestamps.
Intended overall behavior
If socket is configured to generate TX timestamps, and the Error queue has received such information, the async runtime should automatically poll the results.
When data comes in the normal recv buffer, the async runtime should also poll these results.
Possible solutions
One way to fix it would be to poll the socket with recvmsg(...MSG_ERRQUEUE) after sending the packet. But this is cumbersome and due to scheduling, the recvmsg call can't be done immediately after the send. Ideally I would like polling to happen when there is a message in the recv buffer and when there is no message in the recv buffer, but there is a queued entry in the Error queue.
Another solution would be to place a recvmsg with MSG_ERRQUEUE right after the recvmsg (without MSG_ERRQUE). The problem here is that if the socket has not received anything, we won't poll the error queue as well.
Request for comments
I would like help in figuring out how to retrieve TX timestamps without having to manually call recvmsg (...MSG_ERRQUEUE) to get it.
I'm open to suggestions on different approaches to the problem, i.e., without tokio AsyncFd.

Related

Ionic infinite loop on changing observable value

I want to use presence channel into my ionic app to verify at any moment in the app if user is connceted or not;
inside my service i have:
import { PusherServicesService } from 'src/services/pusher-services/pusher-services.service'
export class AuthService {
connectedUser = new BehaviorSubject<any>([])
constructor(
private pusherService: PusherServicesService
)
connectedUserObservable(): Observable<any>{
return this.connectedUser.asObservable()
}
connected(){
this.pusherService.connected().here(data => {
console.log('here', data)
this.connectedUser.next(data)
}).joining(data => {
console.log('joining', data)
this.connectedUserObservable().subscribe(res => {
let tmp = res
const i = tmp.findIndex(item => item.id == data.id)
if(i == -1) {
tmp.push(data)
this.connectedUser.next(tmp)
}
})
}).leaving(data => {
console.log('leaving', data)
this.connectedUserObservable().subscribe(res => {
var tmp = res
const i = tmp.findIndex(item => item.id == data.id)
console.log('tmp', tmp)
console.log('index', i)
if(i > -1) {
tmp.splice(i, 1)
console.log('tmp sliced', tmp)
this.connectedUser.next(tmp)
return ;
}
})
})
}
}
the joining work fine, i can get all user connected over all my application; but when user leave the channel (leaving) in my console i got an infinite loop of my console.log
tmp (2) [{…}, {…}]
index 1
tmp sliced [{…}]
and in the end it return error
tmp (2) [empty × 2]
index 1
tmp sliced [empty]
tmp (2) [empty × 2]
index 1
tmp sliced [empty]
core.js:6479 ERROR RangeError: Maximum call stack size exceeded
at SafeSubscriber.__tryOrUnsub (VM96219 vendor.js:228930)
at SafeSubscriber.next (VM96219 vendor.js:228861)
at Subscriber._next (VM96219 vendor.js:228811)
at Subscriber.next (VM96219 vendor.js:228788)
at BehaviorSubject.next (VM96219 vendor.js:228572)
at BehaviorSubject.next (VM96219 vendor.js:228041)
at SafeSubscriber._next (VM96220 main.js:2555)
at SafeSubscriber.__tryOrUnsub (VM96219 vendor.js:228922)
at SafeSubscriber.next (VM96219 vendor.js:228861)
at Subscriber._next (VM96219 vendor.js:228811)
After each observable You should use take(1) (Unsubscribtion achieved after each change in Your data) operator to avoid memory leak and infinite loop.
Your new observable:
import { take } from 'rxjs/operators';
.....
connectedUserObservable(): Observable<any>{
return this.connectedUser.asObservable().pipe(take(1));
}

Using socket to cmmunicate node app to c# service

[Sorry for my bad English]
Hi!
I have to communicate between a node.js application [client side] and c# application [server side].
Each side sends pings to the other, on TCP protocol.
I Successed to create both client and server side, but I have to problems:
after the first ping from the client to the server, the client throws an error:
This socket has been ended by the other party
What i'm doing wrong?
The server reads the incomeing pings into a large buffer.
When I decode it to string, I get string with length of 4096.
How I read the excact message length?
This is the server side code: [c#, .NET 6]
using System.Net;
using System.Net.Sockets;
using System.Text;
Console.WriteLine("Start...");
CancellationTokenSource cts = new();
TcpListener listener = new(IPAddress.Any, 11111);
try
{
listener.Start();
Console.WriteLine("Listining...");
var clientCounter = 0;
var ct = cts.Token;
while (!ct.IsCancellationRequested)
{
using TcpClient client = await listener.AcceptTcpClientAsync(ct)
.ConfigureAwait(false);
using NetworkStream stream = client.GetStream();
_ = PrintUploadedData(stream, ct);
_ = PushData(stream, ct);
clientCounter++;
Console.WriteLine("New client ({0}) connected", clientCounter);
}
}
finally
{
cts.Cancel();
listener.Stop();
}
async Task PrintUploadedData(NetworkStream stream, CancellationToken ct)
{
var buf = new byte[4096];
while (!ct.IsCancellationRequested)
{
var timeout = CancellationTokenSource.CreateLinkedTokenSource(
ct,
new CancellationTokenSource(TimeSpan.FromMinutes(3)).Token);
try
{
var amountRead = await stream.ReadAsync(buf, timeout.Token);
if (timeout.IsCancellationRequested)
{
Console.Error.WriteLine("No Message.");
break;
}
if (amountRead == 0) break; //end of stream.
var message = Encoding.UTF8.GetString(buf);
Console.WriteLine(message);
}
catch (OperationCanceledException)
{
Console.Error.WriteLine("Time out");
break;
}
}
}
async Task PushData(NetworkStream stream, CancellationToken ct)
{
while (!ct.IsCancellationRequested)
{
var messageToSend = DateTime.Now.TimeOfDay.ToString();
var messageBytes = Encoding.UTF8.GetBytes(messageToSend);
await stream.WriteAsync(messageBytes, ct).ConfigureAwait(false);
await Task.Delay(TimeSpan.FromSeconds(15), ct);
}
}
And the client side code [node.js]:
import { Socket } from 'net';
var client = new Socket();
client.on('connect', () => console.log("CONNECTED"));
client.on('data', data => console.log("data", data.toString()));
client.on('error', err => console.error(err));
client.connect(11111, "127.0.0.1");
printMessages();
async function printMessages() {
for (let i = 0; i < 10; i++) {
client.write('Ping ' + i);
await sleep(4000);
}
}
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}

Subscribing to BLE notifications with flutter_reactive_ble throws error?

I'm pretty new to Dart and know nothing about Swift. Trying to use the flutter_reactive_ble package to manage the BT side of an app I'm building. So far things are going smooth up to the point where attempting to enable notifications fails and crashes the app.
The problem occurs when trying to call subscribeToCharacteristic method like this:
class MeshProxyRx extends ReactiveState<Uint8List> {
MeshProxyRx(this._ble);
final FlutterReactiveBle _ble;
final _stateStreamController = StreamController<Uint8List>.broadcast();
StreamSubscription _meshProxyDataOut;
Uint8List rxDataBuffer;
#override
Stream<Uint8List> get state => _stateStreamController.stream;
startNotification(_proxyDevice) {
print('Notification Start');
final characteristic = QualifiedCharacteristic(
serviceId: serviceUuid,
characteristicId: characteristicUuid,
deviceId: _proxyDevice);
_meshProxyDataOut =
_ble.subscribeToCharacteristic(characteristic).listen((data) {
rxDataBuffer.addAll(data);
_stateStreamController.add(rxDataBuffer);
// code to handle incoming data
// ProxyNodeNotify(nodeAddress: [0x18, 0x28]);
print(data);
}, onError: (dynamic error) {
// code to handle errors
print('Start Notification returns Error: $error');
});
}
Future<void> closeStream() async {
await _stateStreamController.close();
}
Future<void> closeSubscription() async {
_meshProxyDataOut?.cancel();
}
}
The error that throws is:
flutter: REACTIVE_BLE: Start connecting to device with arguments (deviceId: 5523499D-8846-A794-F350-1B5E258859F3, servicesWithCharacteristicsToDiscover: null, timeout: null)
flutter: REACTIVE_BLE: Received ConnectionStateUpdate(deviceId: 5523499D-8846-A794-F350-1B5E258859F3, connectionState: DeviceConnectionState.connected, failure: null)
flutter: Notification Start
flutter: REACTIVE_BLE: Start subscribing to notifications for QualifiedCharacteristic(characteristicId: 2ade, serviceId: 1828, deviceId: 5523499D-8846-A794-F350-1B5E258859F3)
Assertion failed: file flutter_reactive_ble/PluginController.swift, line 103
Assertion failed: file flutter_reactive_ble/PluginController.swift, line 103
* thread #1, queue = 'com.apple.main-thread', stop reason = Assertion failed
frame #0: 0x00000001a9cb8e08 libswiftCore.dylib`_swift_runtime_on_report
libswiftCore.dylib`_swift_runtime_on_report:
-> 0x1a9cb8e08 <+0>: ret
libswiftCore.dylib`_swift_reportToDebugger:
0x1a9cb8e0c <+0>: b 0x1a9cb8e08 ; _swift_runtime_on_report
libswiftCore.dylib`_swift_shouldReportFatalErrorsToDebugger:
0x1a9cb8e10 <+0>: adrp x8, 346593
0x1a9cb8e14 <+4>: ldrb w0, [x8, #0x7c8]
Target 0: (Runner) stopped.
Lost connection to device.
For reference the assert that fails is the guard let sink = conntext.characteristicValueUpdateSink line in the swift code block:
onCharacteristicValueUpdate: papply(weak: self) {
context, central, characteristic, value, error in
guard let sink = context.characteristicValueUpdateSink
else { assert(false); return }
let message = CharacteristicValueInfo.with {
$0.characteristic = CharacteristicAddress.with {
$0.characteristicUuid = Uuid.with { $0.data = characteristic.id.data }
$0.serviceUuid = Uuid.with { $0.data = characteristic.serviceID.data }
$0.deviceID = characteristic.peripheralID.uuidString
}
if let value = value {
$0.value = value
}
if let error = error {
$0.failure = GenericFailure.with {
$0.code = Int32(CharacteristicValueUpdateFailure.unknown.rawValue)
$0.message = "\(error)"
}
}
}
sink.add(.success(message))
}
The assert is doing what it is supposed to do but I cannot figure out what I'm not doing right in the way I set up and make the call to subscribeToCharacteristic. I suspect it may be the way the Stream is set up and not getting passed down properly to the swift code since the assert seems to be guarding against a null Sink? But I really don't have enough experience.
Help is greatly appreciated!!

alternative to using 'await' with lazy_static! macro in rust?

I want to use Async MongoDB in a project.
I don't want to pass around the client because it would need to go around multiple tasks and threads. So I kept a static client using lazy_static. However, I can't use await in the initialization block.
What can I do to work around this?
Suggestions for doing it without lazy_static are also welcome.
use std::env;
use futures::stream::StreamExt;
use mongodb::{
bson::{doc, Bson},
options::ClientOptions,
Client,
};
lazy_static! {
static ref MONGO: Option<Client> = {
if let Ok(token) = env::var("MONGO_AUTH") {
if let Ok(client_options) = ClientOptions::parse(&token).await
^^^^^
{
if let Ok(client) = Client::with_options(client_options) {
return Some(client);
}
}
}
return None;
};
}
I went with this approach based on someone's suggestion in rust forums.
static MONGO: OnceCell<Client> = OnceCell::new();
static MONGO_INITIALIZED: OnceCell<tokio::sync::Mutex<bool>> = OnceCell::new();
pub async fn get_mongo() -> Option<&'static Client> {
// this is racy, but that's OK: it's just a fast case
let client_option = MONGO.get();
if let Some(_) = client_option {
return client_option;
}
// it hasn't been initialized yet, so let's grab the lock & try to
// initialize it
let initializing_mutex = MONGO_INITIALIZED.get_or_init(|| tokio::sync::Mutex::new(false));
// this will wait if another task is currently initializing the client
let mut initialized = initializing_mutex.lock().await;
// if initialized is true, then someone else initialized it while we waited,
// and we can just skip this part.
if !*initialized {
// no one else has initialized it yet, so
if let Ok(token) = env::var("MONGO_AUTH") {
if let Ok(client_options) = ClientOptions::parse(&token).await {
if let Ok(client) = Client::with_options(client_options) {
if let Ok(_) = MONGO.set(client) {
*initialized = true;
}
}
}
}
}
drop(initialized);
MONGO.get()
}
However I can't use await in the initialization block.
You can skirt this with futures::executor::block_on
use once_cell::sync::Lazy;
// ...
static PGCLIENT: Lazy<Client> = Lazy::new(|| {
let client: Client = futures::executor::block_on(async {
let (client, connection) = tokio_postgres::connect(
"postgres:///?user=ecarroll&port=5432&host=/run/postgresql",
NoTls,
)
.await
.unwrap();
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
client
});
client
});
What we have is a non-async closure blocking in a single thread until the resolution of the future.
Create a new runtime from tokio::runtime::Runtime and use block_on to block the current thread until completion.
// database.rs
use tokio::runtime::Runtime;
use mongodb::Client;
pub fn connect_sync() -> Client {
Runtime::new().unwrap().block_on(async {
Client::with_uri_str("mongodb://localhost:27017").await.unwrap()
})
}
// main.rs
mod database;
lazy_static! {
static ref CLIENT: mongodb::Client = database::connect_sync();
}
#[actix_web::main]
async fn main() {
let collection = &CLIENT.database("db_name").collection("coll_name");
// ...
}
Use the async_once crate.
use async_once::AsyncOnce;
use lazy_static::lazy_static;
use mongodb::Client;
lazy_static! {
static ref CLIENT: AsyncOnce<Client> = AsyncOnce::new(async {
Client::with_uri_str(std::env::var("MONGO_URL").expect("MONGO_URL not set"))
.await
.unwrap()
});
}
then
CLIENT.get().await;

Async POST fails on WP7 and F#

When I do let! read = from.AsyncRead buf in F#, it blocks and doesn't return until the TCP socket is dead. Why? And how do I fix it?
Its code:
module StreamUtil
open System.IO
/// copy from 'from' stream to 'toStream'
let (|>>) (from : Stream) (toStream : Stream) =
let buf = Array.zeroCreate<byte> 1024
let rec doBlock () =
async {
let! read = from.AsyncRead buf
if read <= 0 then
toStream.Flush()
return ()
else
do! toStream.AsyncWrite(buf, 0, read)
return! doBlock () }
doBlock ()
It's being called from this code:
use fs = new FileStream(targPath, FileMode.CreateNew, FileAccess.ReadWrite)
do! req.InputStream |>> fs
and requested over HTTP with this code from Windows Phone 7.1 emulator:
public void Send()
{
var b = new UriBuilder(_imageService.BaseUrl) {Path = "/images"};
var req = WebRequest.CreateHttp(b.Uri);
req.ContentType = "image/jpeg";
req.Method = "POST";
var imgLen = SelectedImage.ImageStream.Length;
req.Headers[HttpRequestHeader.ContentLength] = imgLen.ToString(CultureInfo.InvariantCulture);
req.Accept = "application/json";
req.BeginGetRequestStream(RequestReady, new ReqState(req, imgLen));
}
void RequestReady(IAsyncResult ar)
{
var state = (ReqState)ar.AsyncState;
var req = state.Request;
var reqStream = req.EndGetRequestStream(ar);
SmartDispatcher.BeginInvoke(() =>
{
using (var sw = new StreamWriter(reqStream))
using (var br = new BinaryReader(SelectedVoucher.ImageStream))
{
var readBytes = br.ReadBytes(state.ImgLen);
// tried both 2
sw.Write(readBytes);
//sw.Write(Convert.ToBase64String(readBytes));
sw.Flush();
sw.Close();
}
req.BeginGetResponse(ResponseReady, req);
});
}
// WHY IS IT YOU ARE NOT CALLED???
void ResponseReady(IAsyncResult ar)
{
try
{
var request = (HttpWebRequest)ar.AsyncState;
var response = request.EndGetResponse(ar);
SmartDispatcher.BeginInvoke(() =>
{
var rdr = new StreamReader(response.GetResponseStream());
var msg = rdr.ReadToEnd();
var imageLocation = response.Headers["Location"];
Debug.WriteLine(msg);
Debug.WriteLine(imageLocation);
});
}
catch (WebException ex)
{
Debug.WriteLine(ex.ToString());
}
catch (Exception ex)
{
Debug.WriteLine(ex.ToString());
}
}
Unsuccessfully. The ResponseReady callback is never reached.
Meanwhile, this code works excellent:
open System
open System.Net.Http // WebAPI nuget
let sync aw = Async.RunSynchronously aw
let postC<'a> (c : HttpClient) (r : Uri) (cont : HttpContent) =
let response = sync <| Async.AwaitTask( c.PostAsync(r, cont) )
let struc:'a = sync <| deserialize<'a> response
response, struc
let withContent<'a> (fVerb : (HttpClient -> Uri -> HttpContent -> _ * 'a))=
let c = new HttpClient()
fVerb c
[<Test>]
let ``POST /images 201 + Location header`` () =
let post = withContent<MyImage> postC
let bytes = IO.File.ReadAllBytes("sample.jpg")
let hash = SHA1.Create().ComputeHash(bytes) |> Convert.ToBase64String
let pic = new ByteArrayContent(bytes)
pic.Headers.Add("Content-Type", "image/jpeg")
pic.Headers.Add("X-SHA1-Hash", hash)
let resp, ri = (resource "/images", pic) ||> post
resp.StatusCode =? Code.Created
ri.sha1 =? hash
mustHaveHeaders resp
I couldn't get Fiddler2 working with WP7.
EDIT: Welcome to a yak. I've moved onto greener pastures myself ;)
YOu should put the bytes into the before sending and using BufferStream INput output