The responses library provides mocks for requests. In my case, it looks typically like this:
import responses
#responses.activate
def test_foo():
# Add mocks for service A
responses.add(responses.POST, 'http://service-A/foo', json={'bar': 'baz'}, status=200)
responses.add(responses.POST, 'http://service-A/abc', json={'de': 'fg'}, status=200)
#responses.activate
def test_another_foo():
# Add mocks for service A
responses.add(responses.POST, 'http://service-A/foo', json={'bar': 'baz'}, status=200)
responses.add(responses.POST, 'http://service-A/abc', json={'de': 'fg'}, status=200)
How can I avoid this code duplication?
I would love to have a mock_service_a fixture or something similar.
Just as you suggest, creating a fixture solves these issues.
import pytest
import responses
import requests
#pytest.fixture(scope="module", autouse=True)
def mocked_responses():
with responses.RequestsMock() as rsps:
rsps.add(
responses.POST, "http://service-a/foo", json={"bar": "baz"}, status=200
)
rsps.add(
responses.POST, "http://service-a/abc", json={"de": "fg"}, status=200
)
yield rsps
def test_foo():
resp = requests.post("http://service-a/foo", json={"bar": "baz"})
assert resp.status_code == 200
def test_another_foo():
resp = requests.post("http://service-a/abc", json={"de": "fg"})
assert resp.status_code == 200
Running it returns:
==================================== test session starts =====================================
platform darwin -- Python 3.9.1, pytest-6.2.2, py-1.10.0, pluggy-0.13.1
rootdir: **
collected 2 items
tests/test_grab.py .. [100%]
===================================== 2 passed in 0.21s ======================================
Related
I'm trying to implement a simple async test suite. If my understanding is correct of async, the tests below should only take about 2 seconds to run. However, it's taking 6 seconds. What am I missing to make these test to run async ("at the same time")?
import logging
import pytest
import asyncio
MSG_FORMAT = "%(asctime)s.%(msecs)03d %(module)s->%(funcName)-15s |%(levelname)s| %(message)s"
MSG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
LOG_LEVEL = logging.INFO
# Create logger
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
# Create Stream Handler
log_stream = logging.StreamHandler()
log_format = logging.Formatter(fmt=MSG_FORMAT, datefmt=MSG_DATE_FORMAT)
log_stream.setFormatter(log_format)
log_stream.setLevel(LOG_LEVEL)
logger.addHandler(log_stream)
class TestMyStuff:
#staticmethod
async def foo(seconds):
await asyncio.sleep(seconds)
return 1
#pytest.mark.asyncio
async def test_1(self, event_loop):
logger.info("start")
assert await event_loop.create_task(self.foo(2)) == 1
logger.info("end")
#pytest.mark.asyncio
async def test_2(self, event_loop):
logger.info("start")
assert await event_loop.create_task(self.foo(2)) == 1
logger.info("end")
#pytest.mark.asyncio
async def test_3(self, event_loop):
logger.info("start")
# assert await event_loop.run_in_executor(None, self.foo) == 1
assert await event_loop.create_task(self.foo(2)) == 1
logger.info("end")
pytest extras:
plugins: asyncio-0.18.3, aiohttp-1.0.4
pytest-asyncio runs asynchronous tests serially. That plugin's goal is to make testing asynchronous code more convenient.
pytest-asyncio-cooperative on the other hand has the goal of running asyncio tests concurrently via cooperative multitasking (ie. all async coroutines sharing the same event loop and yielding to each other).
To try out pytest-asyncio-cooperative do the following:
Install the plugin
pip install pytest-asyncio-cooperative
Replace the #pytest.mark.asyncio marks with #pytest.mark.asyncio_cooperative
Remove all references to event_loop. pytest-asyncio-cooperative uses a single implicit event loop for asyncio interactions.
Run pytest with pytest-asyncio disabled. It is not compatible with pytest-asyncio-cooperative
pytest -p no:asyncio test_mytestfile.py
Here is the original code snippet with these modifications:
import logging
import pytest
import asyncio
MSG_FORMAT = "%(asctime)s.%(msecs)03d %(module)s->%(funcName)-15s |%(levelname)s| %(message)s"
MSG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
LOG_LEVEL = logging.INFO
# Create logger
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
# Create Stream Handler
log_stream = logging.StreamHandler()
log_format = logging.Formatter(fmt=MSG_FORMAT, datefmt=MSG_DATE_FORMAT)
log_stream.setFormatter(log_format)
log_stream.setLevel(LOG_LEVEL)
logger.addHandler(log_stream)
class TestMyStuff:
#staticmethod
async def foo(seconds):
await asyncio.sleep(seconds)
return 1
#pytest.mark.asyncio_cooperative
async def test_1(self):
logger.info("start")
assert await self.foo(2) == 1
logger.info("end")
#pytest.mark.asyncio_cooperative
async def test_2(self):
logger.info("start")
assert await self.foo(2) == 1
logger.info("end")
#pytest.mark.asyncio_cooperative
async def test_3(self):
logger.info("start")
# assert await event_loop.run_in_executor(None, self.foo) == 1
assert await self.foo(2) == 1
logger.info("end")
And here are the test results:
plugins: hypothesis-6.39.4, asyncio-cooperative-0.28.0, anyio-3.4.0, typeguard-2.12.1, Faker-8.1.0
collected 3 items
test_mytestfile.py ... [100%]
================================ 3 passed in 2.18s =================================
Please checkout the README of pytest-asyncio-cooperative. Now that tests are run in a concurrent way you need to be wary of shared resources (eg. mocking)
There are 2 files, the code in the first one is:
import pytest
class TestXdist2():
#pytest.mark.dependency(name="aa")
def test_t1(self):
print("\ntest_1")
assert True
the code in the second file is:
import pytest
import sys, os
sys.path.append(os.getcwd())
from testcases.test_xdist_2 import TestXdist2
class TestXdist1():
def setup_class(self):
self.x = TestXdist2()
#pytest.mark.dependency(depends=["aa"], scope="module")
def test_t2(self):
print("\ntest_t2")
assert 1==1
if __name__ == "__main__":
pytest.main(["-s", "-v", f"{os.path.abspath('testcases')}/test_xdist_1.py"])
when I run the senond file, I thought test case "test_t1" should be ran firstly, then "test_t2" ran secondly, but the result is like this, "test_t2" is skipped, I don'y know why,
PS D:\gitProjects\selenium_pytest_demo> & D:/Python38/python.exe d:/gitProjects/selenium_pytest_demo/testcases/test_xdist_1.py
Test session starts (platform: win32, Python 3.8.7, pytest 6.2.2, pytest-sugar 0.9.4)
cachedir: .pytest_cache
metadata: {'Python': '3.8.7rc1', 'Platform': 'Windows-10-10.0.18362-SP0', 'Packages': {'pytest': '6.2.2', 'py': '1.10.0', 'pluggy': '0.13.1'}, 'Plugins': {'allure-pytest': '2.8.35', 'dependency': '0.5.1', 'forked': '1.3.0', 'html': '3.1.1', 'metadata': '1.11.0', 'rerunfailures': '9.1.1', 'sugar': '0.9.4', 'xdist': '2.2.1'}, 'JAVA_HOME': 'D:\\Java\\jdk-15.0.1'}
rootdir: D:\gitProjects\selenium_pytest_demo, configfile: pytest.ini
plugins: allure-pytest-2.8.35, dependency-0.5.1, forked-1.3.0, html-3.1.1, metadata-1.11.0, rerunfailures-9.1.1, sugar-0.9.4, xdist-2.2.1
collecting ...
testcases\test_xdist_1.py::TestXdist1.test_t2 s 50% █████
test_1
testcases\test_xdist_2.py::TestXdist2.test_t1 ✓ 100% ██████████
Results (0.04s):
1 passed
1 skipped
This is the expected behavior - pytest-dependency does not order testcases, it only skips testcases if the testcase they depend on is skipped or failed. There exists a PR that would change that, but is not merged
Until that, you can use pytest-order. If you just want the ordering, you can use relative markers. If you also want to skip tests if the test they depend on failed, you can use pytest-dependency as before, but use the pytest-order option --order-dependencies to order the tests additionally.
Disclaimer:
I'm the author of pytest-order (which is a fork of pytest-ordering).
I have the next file with tests
import pytest
from httpx import AsyncClient
import sys
import config
from main import app
#pytest.mark.asyncio
async def test_register():
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/register", )
assert response.status_code == 200
I want to run it like
if __name__ == '__main__':
pytest.run() # Or something alike
How I can do it? I need it to assign name 'main' to this module, because the main module (which import above) has a constraint like: if __name__ == '__main__', so without it tests will not be run indeed.
pytest.main() would run pytest in the current working directory and this would include your file depending on the filename (e.g. if the filename starts with test_). To run pytest on the current file only you can run:
if __name__ == "__main__":
pytest.main([__file__])
I am new to Python, playing around with stuff, trying to communicate python services via Kafka using Faust.
So I have small PoC project.
Faust app definition:
# app.py
import faust as f
from models import ReadRequest, ReadResponse
app = f.App("faust-app", broker="kafka://localhost:9092", store="rocksdb://")
topics = {"read-request": app.topic("read-request", value_type=ReadRequest)}
def get_app() -> f.types.AppT:
return app
def get_topic(name: str) -> f.types.TopicT:
return topics[name]
My DB reader agent:
# reader.py
import pandas as pd
from pymongo import MongoClient
from app import get_app, get_topic
client = MongoClient()
app = get_app()
req_topic = get_topic("read-request")
#app.agent(req_topic)
async def read_request(requests):
async for request in requests:
db = client.test
coll = db[request["collection"]]
result = coll.find(request["query"])
df = pd.DataFrame(result)
response = {
"id": request["id"],
"data": list(df.loc[:, df.columns != "_id"].to_dict(orient="records")),
}
print(response) # debug <1>
yield response
Model definitions:
# models.py
import faust as f
class ReadRequest(f.Record):
id: int
collection: str
query: dict
Test agent.ask()
# test.py
import asyncio
from reader import read_request
from models import ReadRequest
async def run():
result = await read_request.ask(ReadRequest(id=1, collection="test", query={}))
print(result) # debug <2>
if __name__ == "__main__":
asyncio.run(run())
So I have zookeeper, kafka server, mongodb and faust worker reader running. Everything is using out of the box configs.
When I run python3 test.py I see debug <1> print output as expected, but debug <2> never goes off and execution hangs there.
Faust docs say
So I assume that I am doing everything right.
Anyone has any clues?
import falcon
import json
from tasks import add
from waitress import serve
class tasksresource:
def on_get(self, req, resp):
"""Handles GET requests"""
self.result = add.delay(1, 2)
self.context = {'ID': self.result.id, 'final result': self.result.ready()}
resp.body = json.dumps(self.context)
api = falcon.API()
api.add_route('/result', tasksresource())
# api.add_route('/result/task', taskresult())
if __name__ == '__main__':
serve(api, host='127.1.0.1', port=5555)
how do i get the Get the task id from json payload ( post data)
and add a route to it
Here a small example. Structure of files:
/project
__init__.py
app.py # routes, falcon etc.
tasks.py # celery
example.py # script for demonstration how it works
app.py:
import json
import falcon
from tasks import add
from celery.result import AsyncResult
class StartTask(object):
def on_get(self, req, resp):
# start task
task = add.delay(4, 4)
resp.status = falcon.HTTP_200
# return task_id to client
result = {'task_id': task.id}
resp.body = json.dumps(result)
class TaskStatus(object):
def on_get(self, req, resp, task_id):
# get result of task by task_id and generate content to client
task_result = AsyncResult(task_id)
result = {'status': task_result.status, 'result': task_result.result}
resp.status = falcon.HTTP_200
resp.body = json.dumps(result)
app = falcon.API()
# registration of routes
app.add_route('/start_task', StartTask())
app.add_route('/task_status/{task_id}', TaskStatus())
tasks.py:
from time import sleep
import celery
app = celery.Celery('tasks', broker='redis://localhost:6379/0', backend='redis://localhost:6379/0')
#app.task
def add(x, y):
"""
:param int x:
:param int y:
:return: int
"""
# sleep just for demonstration
sleep(5)
return x + y
Now we need to start celery application. Go to project folder and run:
celery -A tasks worker --loglevel=info
After this we need to start Falcon application. Go to project folder and run:
gunicorn app:app
Ok. Everything is ready.
example.py is small client side which can help to understand:
from time import sleep
import requests
# start new task
task_info = requests.get('http://127.0.0.1:8000/start_task')
task_info = task_info.json()
while True:
# check status of task by task_id while task is working
result = requests.get('http://127.0.0.1:8000/task_status/' + task_info['task_id'])
task_status = result.json()
print task_status
if task_status['status'] == 'SUCCESS' and task_status['result']:
print 'Task with id = %s is finished' % task_info['task_id']
print 'Result: %s' % task_status['result']
break
# sleep and check status one more time
sleep(1)
Just call python ./example.py and you should see something like this:
{u'status': u'PENDING', u'result': None}
{u'status': u'PENDING', u'result': None}
{u'status': u'PENDING', u'result': None}
{u'status': u'PENDING', u'result': None}
{u'status': u'PENDING', u'result': None}
{u'status': u'SUCCESS', u'result': 8}
Task with id = 76542904-6c22-4536-99d9-87efd66d9fe7 is finished
Result: 8
Hope this helps you.
The above example by Danila Ganchar is great and very helpful. I'm using celery version 4.3.0 with Python 3, and one of the errors I received from using the example above is on this line:
task_result = AsyncResult(task_id)
The error I would receive is:
AttributeError: 'DisabledBackend' object has no attribute '_get_task_meta_for'
This may be a recent change, but result.AsyncResult (or just AsyncResult in this example because he imported it from celery.result) doesn't know the backend you are using. There are 2 solutions to solving this problem:
1) You can take the AsyncResult of the actual task itself add.AsyncResult(task_id) because the add task already has the backend defined through the #app.task decorator. The downside to this in this example is you want to be able to get the result for any task by just passing in the task_id via the Falcon endpoint, so this is limited
2) The preferred method is to just pass in the app parameter to the AsyncResult function:
task = result.AsyncResult(id, app=app)
Hope this helps!