Background
I wanted to write a unit test for connect on the code excerpts below given the following assumptions:
When AsyncClient is instantiated, self.io.connected and
self.conn_established are False
At the first connect call,
we assert that self.io.connect is called (ideally,
self.io.connect should be mocked here)
When connect event is
emitted self.conn_established is set to True (since we mock
self.io.connect, then we'll need to mock the triggering of the
connect event)
class AsyncClient:
def __init__(self):
self.conn_established = False
self.io = socketio.AsyncClient()
#self.io.event
def connect():
self.connection_established = True
async def connect(self):
while not self.io.connected:
await self.io.connect(endpoint)
while not self.conn_established:
await asyncio.sleep(1)
What I had tried
I was able to write a mock for io.connect, but I'm stuck with triggering the socketio connect event:
#pytest.fixture
def async_client():
yield AsyncClient()
class AsyncMock(mock.MagicMock):
async def __call__(self, *args, **kwargs):
return super(AsyncMock, self).__call__(*args, **kwargs)
#pytest.mark.asyncio
async def test_connect(async_client):
def mock_successful_conn(*args, **kwargs):
async_client.io.connected = True
# how do I trigger the following?
async_client.io.trigger_event("connect")
# mock io.connect
async_client.io.connect = AsyncMock(spec=async_client.io.connect, side_effect=mock_successful_conn)
await async_client.connect()
Questions
How do I write the unit tests for the above?
Is there a way to trigger socketio events for testing purposes?
Thanks! Your help would be greatly appreciated.
ok, thanks to #Miguel's suggestion, I was able to mock the triggering of the event by calling the event handler function directly.
We can get them by accessing the client's handlers which are first indexed by namespace, and then indexed by the event handler's name.
So in my case, I can get the connect event handler by doing this:
connect_handler = tunnel_client.io.handlers["/"]["connect"]
connect_handler()
Related
I developed a FastAPI app with WebSockets and I'm trying to test it. I need async tests in order to check data in the database during tests. My setup looks like this: I have a fixture that generates a database session:
#pytest_asyncio.fixture(scope="session")
async def async_engine() -> AsyncEngine:
yield create_async_engine(settings.TEST_DATABASE_URL)
#pytest_asyncio.fixture(scope="function")
async def session(async_engine):
AsyncSessionLocal = sessionmaker(
bind=async_engine, autoflush=False, expire_on_commit=False, future=True, class_=AsyncSession
)
yield AsyncSessionLocal()
Then I have a fixture to create a client:
#pytest_asyncio.fixture(scope="function")
async def client(session):
app.dependency_overrides[get_db_session] = lambda: session
async with AsyncClient(transport=ASGIWebSocketTransport(app), base_url="http://test") as client:
yield client
The transport=ASGIWebSocketTransport(app) part is from httpx-ws library, httpx itself does not support websockets.
The tests looks like this:
#pytest.mark.asyncio
async def test_http(session, client):
r = await client.get('/test/')
#pytest.mark.asyncio
async def test_ws(session, client):
async with aconnect_ws("/ws/", client) as ws:
await ws.send_json({})
test_http works as expected. However, test_ws does not: all sql queries in /ws/ endpoint hang forever. When engine is created with echo=True I see my sql statement in logs like sqlalchemy.engine.Engine SELECT test.id FROM test and then nothing. I've tried looking into pg_stat_activity, pg_locks and pg_blocking_pids(pid) with no success.
At this point I'm stuck: I've read through httpx-ws code and couldn't find any potential problems. What else can I do to figure out why database connection hangs in my case?
I receive an error RuntimeError: Event loop is closed each time when i try to make more than one async call function inside my test. I already tried to use all suggestions on stackoverflow to rewrite event_loop fixture but nothing works. I wonder what i'm missing
Run test command: python -m pytest tests/ --asyncio-mode=auto
requirements.txt
pytest==7.1.2
pytest-asyncio==0.18.3
pytest-html==3.1.1
pytest-metadata==2.0.1
test.py
async def test_user(test_client_fast_api):
assert 200 == 200
request_first = test_client_fast_api.post( # works fine
"/first_route",
)
request_second = test_client_fast_api.post( # recieve RuntimeError: Event loop is closed
"/second_route",
)
conftest.py
#pytest.fixture()
def event_loop():
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
yield loop
loop.close()
It took me all afternoon to solve this problem.
I also try to succeed from other people's code, here is my code.
Add a file conftest.py to the directory where the test script is placed.
And write the following code.
import pytest
from main import app
from httpx import AsyncClient
#pytest.fixture(scope="session")
def anyio_backend():
return "asyncio"
#pytest.fixture(scope="session")
async def client():
async with AsyncClient(app=app, base_url="http://test") as client:
print("Client is ready")
yield client
And then write a test script test_xxx.py.
import pytest
from httpx import AsyncClient
#pytest.mark.anyio
async def test_run_not_exists_schedule(client: AsyncClient):
response = await client.get("/schedule/list")
assert response.status_code == 200
schedules = response.json()["data"]["schedules"]
schedules_exists = [i["id"] for i in schedules]
not_exists_id = max(schedules_exists) + 1
request_body = {"id": not_exists_id}
response = await client.put("/schedule/run_cycle", data=request_body)
assert response.status_code != 200
#pytest.mark.anyio
async def test_run_adfasdfw(client: AsyncClient):
response = await client.get("/schedule/list")
assert response.status_code == 200
schedules = response.json()["data"]["schedules"]
schedules_exists = [i["id"] for i in schedules]
not_exists_id = max(schedules_exists) + 1
request_body = {"id": not_exists_id}
response = await client.put("/schedule/run_cycle", data=request_body)
assert response.status_code != 200
This is the real test code for my own project. You can change it to your own.Finally, run in the project's terminal python -m pytest.If all goes well, it should be ok's.This may involve libraries that need to be installed.
pytest
httpx
Yeah wow I had a similar afternoon to your experience #Bai Jinge
This is the event loop fixture and TestClient pattern that worked for me:
from asyncio import get_event_loop
from unittest import TestCase
from async_asgi_testclient import TestClient
#pytest.fixture(scope="module")
def event_loop():
loop = get_event_loop()
yield loop
#pytest.mark.asyncio
async def test_example_test_case(self):
async with TestClient(app) as async_client:
response = await async_client.get(
"/api/v1/example",
query_string={"example": "param"},
)
assert response.status_code == HTTP_200_OK
Ref to relevant GitHub issue: https://github.com/tiangolo/fastapi/issues/2006#issuecomment-689611040
Please note - I could NOT figure our how to use Class based tests. Neither unittest.TestCase or asynctest.case.TestCase would work for me. pytest-asyncio docs (here) state that:
Test classes subclassing the standard unittest library are not supported, users are recommended to use unittest.IsolatedAsyncioTestCase or an async framework such as asynctest.
I've been struggling to find a solution for my problem, I hope I've come to the right place.
I have a django rest framework API which connect to a postgresql db and I run bots on my own API in order to do stuff. Here is my code :
def get_or_create_eventloop():
"""Get the eventLoop only one time (create it if does not exist)"""
try:
return asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.get_event_loop()
My DB class which use asyncpg to connect / create a pool :
Class DB():
def __init__(self,loop):
self.pool = loop.run_until_complete(self.connect_to_db())
def connect_to_db():
return await asyncpg.create_pool(host="host",
database="database",
user="username",
password="pwd",
port=5432)
My API class :
Class Api(APIView):
#create a loop event since its not the main thread
loop = get_or_create_eventloop()
nest_asyncio.apply() #to avoid the <loop already running> problem
#init my DB pool directly so I wont have to connect each time
db_object = DB(loop)
def post(self,request):
... #I want to be able to call "do_something()"
async def do_something(self):
...
I have my bots running and sending post/get request to my django api via aiohttp.
The problem I'm facing is :
How to implement my post function in my API so he can handle multiple requests knowing that it's a new thread each time therefore a new event loop is created AND the creation of the pool in asyncpg is LINKED to the current event loop, i.e can't create new event loop, I need to keep working on the one created at the beginning so I can access my db later (via pool.acquire etc)
This is what I tried so far without success :
def post(self,request):
self.loop.run_until_complete(self.do_something())
This create :
RuntimeError: Non-thread-safe operation invoked on an event loop other than the current one
which I understand, we are trying to call the event loop from another thread possibly
I also tried to use asyng_to_sync from DJANGO :
#async_to_sync
async def post(..):
resp = await self.do_something()
The problem here is when doing async_to_sync it CREATES a new event loop for the thread, therefore I won't be able to access my DB POOL
edit : cf https://github.com/MagicStack/asyncpg/issues/293 for that (I would love to implement something like that but can't find a way)
Here is a quick example of one of my bot (basic stuff) :
import asyncio
from aiohttp import ClientSession
async def send_req(url, session):
async with session.post(url=url) as resp:
return await resp.text()
async def run(r):
url = "http://localhost:8080/"
tasks = []
async with ClientSession() as session:
for i in range(r):
task = asyncio.asyncio.create_task(send_req(url, session))
tasks.append(task)
responses = await asyncio.gather(*tasks)
print(responses)
if __name__ == '__main__':
asyncio.run(main())
Thank you in advance
After days of looking for an answer, I found the solution for my problem. I just used the package psycopg3 instead of asyncpg (now I can put #async_to_sync to my post function and it works)
I'm a python starter, and I'm trying to write some data analysis programs. the program is like below:
import asyncio
import time
class Test:
def __init__(self, task):
self.task = task
time.sleep(5) # here's some other jobs...
print(f'{self.task = }')
async def main():
result = []
tasks = ['task1', 'task2', 'task3', 'task4', 'task5', 'task6', 'task7', 'task8', 'task9']
print(f"started at {time.strftime('%X')}")
# I have a program structure like this, can I use async?
# how to start init tasks at almost the same time?
for task in tasks:
result.append(Test(task))
print(f"finished at {time.strftime('%X')}")
asyncio.run(main())
I've tried some other way like multiprocessing, it works, code like below:
...
def main():
result = []
tasks = ['task1', 'task2', 'task3', 'task4', 'task5', 'task6', 'task7', 'task8', 'task9']
print(f"started at {time.strftime('%X')}")
# I have a program structure like this, can I use async?
# how to start init tasks at the same time?
p = Pool()
result = p.map(operation, [(task,) for task in tasks])
print(f"finished at {time.strftime('%X')}")
...
but I still want to learn some 'modern way' to do this. I've found a module named 'ray', it's new.
But could async do this? I'm still wondering...
If someone can give me some advice, thanks a lot.
Your example code won't necessarily benefit from async IO, because __init__ is not "awaitable". You might be able to benefit from async if your code were structured differently and has an appropriate bottleneck. For example, if we had:
class Task:
def __init__(self):
<some not io bound stuff>
<some io bound task>
We could re-structure this to:
class Task:
def __init__(self):
<some not io bound stuff>
async def prime(self):
await <some io bound task>
Then in your main loop you can initialise the tasks as you're doing, then run the slow prime step in your event loop.
My advice here though would be to resist doing this unless you know you definitely have a problem. Coroutines can be quite fiddly, so you should only do this if you need to do it!
I am trying to use aiohttp in one of my projects and struggling to figure out how to create a persistent aiohttp.ClientSession object. I have gone through the official aiohttp documentation but did not find it help in this context.
I have looked through other online forums and noticed that a lot has changed ever since aiohttp was created. In some examples on github, the aiohttp author is shown to be creating a ClientSession outside a coroutine functions (i.e. class Session: def __init__(self): self.session = aiohttp.ClientSession()). I also found that one should not create a ClientSession outside coroutine.
I have tried the following:
class Session:
def __init__(self):
self._session = None
async def create_session(self):
self._session = aiohttp.ClientSession()
async fetch(self, url):
if self._session is None:
await self.create_session()
async with self._session.get(url) as resp:
return await resp.text()
I am getting a lot of warning about UnclosedSession and connector. I also frequently get SSLError. I also noticed that 2 out of three calls gets hung and I have to CTRL+C to kill it.
With requests I can simply initialize the session object in __init__, but it's not as simple as this with aiohttp.
I do not see any issues if I use the following (which is what I see as example all over the place) but unfortunately here I end up creating ClientSession with every request.
def fetch(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
return await resp.text()
I can wrap aiohttp.ClientSession() in another function and use that as context-manager, but then too I would end up creating a new session object every time I call the wrapper function. I am trying to figure how to save a aiohttp.ClientSession in class namespace and reuse it.
Any help would be greatly appreciated.
Here is working example:
from aiohttp import ClientSession, TCPConnector
import asyncio
class CS:
_cs: ClientSession
def __init__(self):
self._cs = ClientSession(connector=TCPConnector(verify_ssl=False))
async def get(self, url):
async with self._cs.get(url) as resp:
return await resp.text()
async def close(self):
await self._cs.close()
async def func():
cs = CS()
print(await cs.get('https://google.com'))
await cs.close() # you must close session
loop = asyncio.get_event_loop()
loop.run_until_complete(func())
You can do it.
I implemented a way to share session when writing Django programs (using asgi).Use pid to mark the session of different processes, which is convenient for django to call between different processes.
After actual testing, I can directly call the shared session.
Django 3.2
uvicorn
aiohttp.py
import os
import asyncio
import aiohttp
import logging
session_list = {}
logger = logging.getLogger(__name__)
class Req:
#property
def set_session(self):
try:
loop = asyncio.get_running_loop()
except:
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
session = aiohttp.ClientSession(loop=loop)
session_list.update({os.getpid(): session})
return session
def __init__(self):
if session_list.get(os.getpid()):
self.session = session_list.get(os.getpid())
else:
self.session = self.set_session
async def test(self):
if session_list:
session = session_list.get(os.getpid())
if session and session.closed:
session_list.pop(os.getpid())
session = self.set_session
else:
session = self.set_session
if not session or session.loop.is_running():
session = self.set_session
logger.warning("session abnormal")
result = await session.get("http://httpbing.org/get")
print(result.status)
req = Req()
views.py
from django.http import HttpResponse
from django.shortcuts import render # noqa
from django.views.generic import View
from django.utils.decorators import classonlymethod
import asyncio
class TTT(View):
#classonlymethod
def as_view(cls, **initkwargs):
view = super().as_view(**initkwargs)
view._is_coroutine = asyncio.coroutines._is_coroutine
return view
async def get(self, request):
await req.test()
return HttpResponse("ok")