I am trying to create a websocket server that receives input from user, and at the same time read from kafka and continuously send data to user. I am having trouble getting both to run concurrently.
async def sender(websocket):
consumer = KafkaConsumer(...) # read from kafka
for message in consumer:
msg = json.loads(message.value.decode())
await websocket.send(msg)
async def handle_client(websocket):
asyncio.create_task(sender(websocket))
while True:
print("Waiting for client input")
try:
msg = await websocket.recv()
# do something with msg
async def main():
async with websockets.serve(handle_client, "localhost", 8765):
await asyncio.Future() # run forever
if __name__ == "__main__":
asyncio.run(main())
They seem to be blocking. I think the issuer is because sender() blocks it, it never sleeps?
Related
I wrote a socket server and client package with asyncio that happens to be a single module with two classes Server and Client. The intention is to have the code check if the specified port is in use on the same device, and if so, it will choose the use the Client class instead of the Server class. I am doing this to enable local testing between client and server for a larger goal.
I wrote my unit test with pytest and I can get it to pass if I run the server in its own process first, then run the client in another process.
However, I want to test both server and client together in the same process using pytest. The test never completes this way, however.
Am I running into an issue with asyncio here? Or is it an issue with pytest? Or is it something I am doing wrong in my code?
WebSockets.websocket
import asyncio
class Server:
def __init__(self, host, port):
self.host = host
self.port = port
self.server = None
self.connections = {}
async def start(self):
self.server = server = await asyncio.start_server(self.handle_client, self.host, self.port)
addr = server.sockets[0].getsockname()
print(f'Serving on {addr}')
async with server:
await server.serve_forever()
async def handle_client(self, reader, writer):
addr = writer.get_extra_info('peername')
print(f'New connection from {addr}')
self.connections[addr] = writer
try:
while not reader.at_eof():
data = await reader.read(100)
message = data.decode()
if message:
print(f'Received {message!r} from {addr}')
await self.send(message, addr)
finally:
del self.connections[addr]
writer.close()
async def send(self, message, addr):
writer = self.connections.get(addr)
if not writer:
return
writer.write(message.encode())
await writer.drain()
async def recv(self, addr):
reader, _ = await asyncio.open_connection(addr[0], addr[1])
data = await reader.read(100)
return data.decode()
async def stop(self):
await self.server.close()
class Client:
def __init__(self, host, port):
self.host = host
self.port = port
self.reader = None
self.writer = None
async def connect(self):
self.reader, self.writer = await asyncio.open_connection(self.host, self.port)
async def send(self, message):
self.writer.write(message.encode())
async def recv(self):
data = await self.reader.read(100)
return data.decode()
async def close(self):
self.writer.close()
Test
import socket
import asyncio
from WebSockets.websocket import Server, Client
import pytest
def check_port(address, port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((address, port))
s.close()
return "server"
except OSError:
return "client"
async def serve():
server = Server('localhost', 8910)
await server.start()
async def client_connect():
client = Client('localhost', 8910)
await client.connect()
await client.send("Hello world!")
message = await client.recv()
if message:
print(f"Received {message!r} from server")
assert message == "Hello world!"
await client.close()
#pytest.mark.asyncio
async def test_sockets():
if check_port("localhost", 8910) == "server":
await serve()
# for reference on how to use in non pytest module
# loop = asyncio.new_event_loop()
# loop.run_until_complete(serve())
# loop.close()
else:
await client_connect()
# for reference on how to use in non pytest module
# loop = asyncio.new_event_loop()
# loop.run_until_complete(client_connect())
# loop.close()
I'm implementing a producer-consumer program in a server implementation using sockets and asyncio. The problem is the async function sock_recv() does not seem to be working properly when used with a socket wrapped in an ssl connection. Following is the working code.
Server side
import asyncio
import random
import socket
import ssl
SERVER_ADDRESS = (HOST, PORT) = "127.0.0.1", 8881
async def producer(queue, client_connection, event_loop):
while True:
print("Waiting for sock_recv")
await event_loop.sock_recv(client_connection, 4096)
r = random.randint(1,101)
print("Produced: %d" % r)
await queue.put(r)
await asyncio.sleep(0)
async def consumer(queue):
while True:
print("Wating for queue.get()")
r = await queue.get()
await asyncio.sleep(2)
print("Consumed: %d" % r)
async def main():
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind(SERVER_ADDRESS)
listen_socket.listen(5)
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(certfile="certificate.pem", keyfile="key.pem")
client_connection, client_address = listen_socket.accept()
# client_connection = ssl_context.wrap_socket(
# client_connection, server_side=True
# )
client_connection.setblocking(False)
queue = asyncio.Queue()
t1 = asyncio.create_task(producer(queue, client_connection, asyncio.get_event_loop()))
t2 = asyncio.create_task(consumer(queue))
await asyncio.wait([t1, t2])
event_loop = asyncio.get_event_loop()
asyncio.run(main())
Client side
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 8881))
s.sendall(b"Hello")
Output
Waiting for sock_recv
Waiting for queue.get()
Produced: 49
Waiting for sock_recv
Consumed: 49
Waiting for queue.get()
Here's the problem, When I uncomment the following part
# client_connection = ssl_context.wrap_socket(
# client_connection, server_side=True
# )
It blocks on the sock_recv() function.
With the uncommented code, I get the following output:
Output
Waiting for sock_recv
Waiting for queue.get()
Client Code
import socket
import ssl
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock = ssl.wrap_socket(s)
sock.connect(("127.0.0.1", 8881))
sock.sendall(b"Hello")
Finally, when I shutdown the server with ctrl-c. I get the following output
^CTask exception was never retrieved
future: <Task finished coro=<producer() done, defined at asyncio_test.py:8> exception=SSLWantReadError(2, 'The operation did not complete (read) (_ssl.c:2488)')>
Traceback (most recent call last):
File "asyncio_test.py", line 11, in producer
await event_loop.sock_recv(client_connection, 4096)
File "/home/coverfox/.pyenv/versions/3.7.3/lib/python3.7/asyncio/selector_events.py", line 352, in sock_recv
return await fut
File "/home/coverfox/.pyenv/versions/3.7.3/lib/python3.7/asyncio/selector_events.py", line 366, in _sock_recv
data = sock.recv(n)
File "/home/coverfox/.pyenv/versions/3.7.3/lib/python3.7/ssl.py", line 1037, in recv
return self.read(buflen)
File "/home/coverfox/.pyenv/versions/3.7.3/lib/python3.7/ssl.py", line 913, in read
return self._sslobj.read(len)
ssl.SSLWantReadError: The operation did not complete (read) (_ssl.c:2488)
Traceback (most recent call last):
File "asyncio_test.py", line 42, in <module>
asyncio.run(main())
File "/home/coverfox/.pyenv/versions/3.7.3/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/home/coverfox/.pyenv/versions/3.7.3/lib/python3.7/asyncio/base_events.py", line 571, in run_until_complete
self.run_forever()
File "/home/coverfox/.pyenv/versions/3.7.3/lib/python3.7/asyncio/base_events.py", line 539, in run_forever
self._run_once()
File "/home/coverfox/.pyenv/versions/3.7.3/lib/python3.7/asyncio/base_events.py", line 1739, in _run_once
event_list = self._selector.select(timeout)
File "/home/coverfox/.pyenv/versions/3.7.3/lib/python3.7/selectors.py", line 468, in select
fd_event_list = self._selector.poll(timeout, max_ev)
KeyboardInterrupt
Edit:
I just found out that it works if I pass do_handshake_on_connect=False in the wrap_socket() function in the client code, but then ssl won't work.
So, it turns out that the async function sock_recv() does not support SSLSocket, since SSL needs to write to user-space buffer as described in this SO answer.
The way to work around this issue is to use the transports or streams in asyncio. Here, is the working version of the code in the above question.
import asyncio
import random
import socket
import ssl
SERVER_ADDRESS = (HOST, PORT) = "127.0.0.1", 8881
async def producer(reader, writer, queue):
while True:
print("Waiting for sock_recv")
await reader.read(16)
r = random.randint(1,101)
print("Produced: %d" % r)
await queue.put(r)
await asyncio.sleep(0)
async def consumer(queue):
while True:
print("Wating for queue.get()")
r = await queue.get()
await asyncio.sleep(2)
print("Consumed: %d" % r)
async def set_up_producer_consumer(reader, writer):
queue = asyncio.Queue()
t1 = asyncio.create_task(producer(reader, writer, queue))
t2 = asyncio.create_task(consumer(queue))
await asyncio.wait([t1, t2])
async def main():
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(certfile="certificate.pem", keyfile="key.pem")
server = await asyncio.start_server(set_up_producer_consumer, HOST, PORT, family=socket.AF_INET, ssl=ssl_context, reuse_address=True)
await server.wait_closed()
event_loop = asyncio.get_event_loop()
asyncio.run(main())
I have two services: one that sends stream data and the second one receives it using akka-grpc for communication. When source data is provided Service one is called to process and send it to service two via grpc client. It's possible that multiple instances of server one runs at the same time when multiple source data are provided at the same time.In long running test of my application. I see below error in service one:
ERROR i.a.g.application.actors.DbActor - GraphStage [akka.grpc.internal.AkkaNettyGrpcClientGraphStage$$anon$1#59d40805] terminated abruptly, caused by for example materializer or act
akka.stream.AbruptStageTerminationException: GraphStage [akka.grpc.internal.AkkaNettyGrpcClientGraphStage$$anon$1#59d40805] terminated abruptly, caused by for example materializer or actor system termination.
I have never shutdown actor systems but only kill actors after doing their job. Also I used proto3 and http2 for request binding. Here is a piece of my code in service one:
////////////////////server http binding /////////
val service: HttpRequest => Future[HttpResponse] =
ServiceOneServiceHandler(new ServiceOneServiceImpl(system))
val bound = Http().bindAndHandleAsync(
service,
interface = config.getString("akka.grpc.server.interface"),
port = config.getString("akka.grpc.server.default-http-port").toInt,
connectionContext = HttpConnectionContext(http2 = Always))
bound.foreach { binding =>
logger.info(s"gRPC server bound to: ${binding.localAddress}")
}
////////////////////client /////////
def send2Server[A](data: ListBuffer[A]): Future[ResponseDTO] = {
val reply = {
val thisClient = interface.initialize()
interface.call(client = thisClient, req = data.asInstanceOf[ListBuffer[StoreRequest]].toList)
}
reply
}
///////////////// grpc communication //////////
def send2GrpcServer[A](data: ListBuffer[A]): Unit = {
val reply = send2Server(data)
Await.ready(reply, Duration.Inf) onComplete {
case util.Success(response: ResponseDTO) =>
logger.info(s"got reply message: ${res.description}")
//////check response content and stop application if desired result not found in response
}
case util.Failure(exp) =>
//////stop application
throw exp.getCause
}
}
Error occurred exactly after waiting for service 2 response :
Await.ready(reply, Duration.Inf)
I can't catch the cause of error.
UPDATE
I found that some stream is missed such that service one sends an stream an indefinitely wait for the response and service two does not receive any thing to reply to service one but still don't know why stream is missed
I also updated akka grpc plugin but has no sense:
addSbtPlugin("com.lightbend.akka.grpc" % "sbt-akka-grpc" % "0.6.1")
addSbtPlugin("com.lightbend.sbt" % "sbt-javaagent" % "0.1.4")
i am losing messages in my tornado chat and i do not known how to detect when the message wasn't sent and to send the message again
there is any way to detect when the conexion get lost? and when the conexión restart send the message
this is my code
def get(self):
try:
json.dumps(MessageMixin.cache)
except KeyError:
raise tornado.web.HTTPError(404)
class MessageMixin(object):
waiters = {}
cache = {}
cache_size = 200
def wait_for_messages(self,cursor=None):
t = self.section_slug
waiters = self.waiters.setdefault(t, [])
result_future = Future()
waiters.append(result_future)
return result_future
def cancel_wait(self, future):
t = self.section_slug
waiters = self.waiters.setdefault(t, [])
waiters.remove(future)
# Set an empty result to unblock any coroutines waiting.
future.set_result([])
def new_messages(self, message):
t = self.section_slug
#cache = self.cache.setdefault(t, [])
#print t
#print self.waiters.setdefault(t, [])
waiters = self.waiters.setdefault(t, [])
for future in waiters:
try:
if message is not None:
future.set_result(message)
except Exception:
logging.error("Error in waiter callback", exc_info=True)
waiters = []
#self.cache.extend(message)
#if len(self.cache) > self.cache_size:
#self.cache = self.cache[-self.cache_size:]
class MessageNewHandler(MainHandler, MessageMixin):
def post(self, section_slug):
self.section_slug = section_slug
post = self.get_argument("html")
idThread = self.get_argument("idThread")
isOpPost = self.get_argument("isOpPost")
arg_not = self.get_argument("arg")
type_not = self.get_argument("type")
redirect_to = self.get_argument("next", None)
message= {"posts": [post],"idThread": idThread,"isOpPost": isOpPost,
"type": type_not,"arg_not": arg_not}
if redirect_to:
self.redirect(redirect_to)
else:
self.write(post)
self.new_messages(message)
class MessageUpdatesHandler(MainHandler, MessageMixin):
#gen.coroutine
def post(self, section_slug):
self.section_slug = section_slug
try:
self.future = self.wait_for_messages(cursor=self.get_argument("cursor", None))
data = yield self.future
if self.request.connection.stream.closed():
return
self.write(data)
except Exception:
raise tornado.web.HTTPError(404)
def on_connection_close(self):
self.cancel_wait(self.future)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/api/1\.0/stream/(\w+)", MessageUpdatesHandler),
(r"/api/1\.0/streamp/(\w+)", MessageNewHandler)
]
tornado.web.Application.__init__(self, handlers)
def main():
tornado.options.parse_command_line()
app = Application()
port = int(os.environ.get("PORT", 5000))
app.listen(port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
In the original chatdemo, this is what the cursor parameter to wait_for_messages is for: the browser tells you the last message it got, so you can send it every message since then. You need to buffer messages and potentially re-send them in wait_for_messages. The code you've quoted here will only send messages to those clients that are connected at the time the message came in (and remember that in long-polling, sending a message puts the client out of the "waiting" state for the duration of the network round-trip, so even when things are working normally clients will constantly enter and leave the waiting state)
I am working with an asyncio.Protocol server where the purpose is for the client to call the server, but wait until the server has responded and data is returned before stopping the client loop.
Based on the asyncio doc Echo Client and Server here: https://docs.python.org/3/library/asyncio-protocol.html#protocol-example-tcp-echo-server-and-client , results of transport.write(...) are returned immediately when called.
Through experience, calling loop.run_until_complete(coroutine) fails with RuntimeError: Event loop is running.
Running asyncio.sleep(n) in the data_received() method of the server doesn't have any effect either.
yield from asyncio.sleep(n) and yield from asyncio.async(asyncio.sleep(n)) in data_received() both hang the server.
My question is, how do I get my client to wait for the server to write a response before giving back control?
I guess to never use transport/protocol pair directly.
asyncio has Streams API for high-level programming.
Client code can look like:
#asyncio.coroutine
def communicate():
reader, writer = yield from asyncio.open_connection(HOST, PORT)
writer.write(b'data')
yield from writer.drain()
answer = yield from reader.read()
# process answer, maybe send new data back to server and wait for answer again
writer.close()
You don't have to change the client code.
echo-client.py
#!/usr/bin/env python3.4
import asyncio
class EchoClient(asyncio.Protocol):
message = 'Client Echo'
def connection_made(self, transport):
transport.write(self.message.encode())
print('data sent: {}'.format(self.message))
def data_received(self, data):
print('data received: {}'.format(data.decode()))
def connection_lost(self, exc):
print('server closed the connection')
asyncio.get_event_loop().stop()
loop = asyncio.get_event_loop()
coro = loop.create_connection(EchoClient, '127.0.0.1', 8888)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
The trick is to place your code (including self.transport methods) into a coroutine and use the wait_for() method, with the yield from statement in front of the statements that require their values returned, or ones which take a while to complete:
echo-server.py
#!/usr/bin/env python3.4
import asyncio
class EchoServer(asyncio.Protocol):
def connection_made(self, transport):
peername = transport.get_extra_info('peername')
print('connection from {}'.format(peername))
self.transport = transport
def data_received(self, data):
print('data received: {}'.format(data.decode()))
fut = asyncio.async(self.sleeper())
result = asyncio.wait_for(fut, 60)
#asyncio.coroutine
def sleeper(self):
yield from asyncio.sleep(2)
self.transport.write("Hello World".encode())
self.transport.close()
loop = asyncio.get_event_loop()
coro = loop.create_server(EchoServer, '127.0.0.1', 8888)
server = loop.run_until_complete(coro)
print('serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
print("exit")
finally:
server.close()
loop.close()
Call echo-server.py and then echo-client.py, the client will wait 2 seconds as determined by asyncio.sleep, then stop.