Python testing with parameterization from function return - pytest

I've been trying to solve a python test using pytest but have not been able to find an example configuration that works - though some are close. Here is my case study:
#pytest.fixture
def vil_check():
code
return [(v1,v2,v3), (...), (...)]
#pytest.mark.parameterize("v1,v2,v3", vil_check):
def test_one(v1,v2,v3):
assert v1 < 2
assert v2 > 5
....
I'm trying to follow this example:
#pytest.mark.parametrize("test_input,expected", [("3+5", 8), ("2+4", 6), ("6*9", 42)])
def test_eval(test_input, expected):
assert eval(test_input) == expected
But using a fixture to supply the list: [("3+5", 8), ("2+4", 6), ("6*9", 42)].
However, this configuration doesn't work:
#pytest.mark.parametrize("v1, v2, v3", vil_check)
def test_max(v1, v2, v3):
assert abs(v1) <= 5
The error is that pytest doesn't see vil_check return as iterable.
There seems to be a way to use pytest_generate_tests to accomplish this but I'm drawing a blank on how to write it.

As per OP's comment, because vil_check need not be a fixture, here's what you can do - remove the fixture decorator from vil_check and call it in mark.parametrize below:
def vil_check():
# code
yield from [(v1,v2,v3), (...), (...)]
#pytest.mark.parametrize("v1,v2,v3", vil_check()):
def test_one(v1,v2,v3):
assert v1 < 2
assert v2 > 5
# code
Few points:
you have spelled parametrized wrong, this may give you error if you have set --strict-markers.
the decorator should not have a :
for performance, i used yield from instead of return in vil_check. This will be efficient in case the list is huge

Related

Is it possible skipif pytest tests depending the parameter?

I have a code which includes: test class, tests, fixtures, parameterizing
Like this:
import pytest
#pytest.fixture
def num():
return 1
#pytest.mark.parameterize('n', [1, 2])
class TestNum:
def test_num(self, num, n):
if n == 2:
pytest.skip()
assert num == n
But I want something like this:
import pytest
#pytest.fixture
def num():
return 1
#pytest.mark.parameterize('n', [1, 2])
class TestNum:
#pytest.mark.skipif(n == 2, reason='no reason to test that')
def test_num(self, num, n):
assert num == n
Question: is it possible to skip test depending the class parameter value from "#pytest.mark.parametrize('n', [1, 2])", before fixture run?
Why "if [condition]: pytest.skip()" does not satisfy me:
I work on the web app project, using Playwright framework and my code is like this:
import pytest
from playwright.sync_api import Page
#pytest.fixture
def new_page(page: Page):
page.goto(URL)
return page
#pytest.mark.parameterize('n', [1, 2])
class TestA:
def test_a(self, n, new_page):
if n == 2:
pytest.skip()
assert True
There are fixtures that create a web_page(page) when test starts, and I have a few such fixtures in my test, which create several pages.
So the main issue is to skip test depending the parameter in mark.parametrize before fixtures run, for time saving
After long searching, and theory review, there is no answer to my specific question

Pytest run a final test on a shared fixture

I want to collect information from all my tests, to ensure that I've covered everything, but none of the posts I've come across seem to do this specifically.
If I use e.g. atexit, sessionfinish or other means mentioned when searching for "pytest function after all tests", I seem to lose the ability to use the fixture, and they seem like they're just teardown functions, rather than actual tests.
I want to be able to assert that 1 and 2 are in my fixture list, after running all tests.
import pytest
#pytest.fixture(scope="module")
def fxtr_test_list():
return []
def test_something_1(fxtr_test_list):
fxtr_test_list.append(1)
def test_something_2(fxtr_test_list):
fxtr_test_list.append(2)
#pytest.fixture(scope="session")
def global_check(request, fxtr_test_list):
assert len(fxtr_test_list) == 0 # initial check, should pass
def final_check(request):
assert len(fxtr_test_list) == 0 # final check, should fail
request.addfinalizer(final_check)
return request
You can use fixtures only in tests or other fixtures, so using a fixture in some hook is not possible.
If you don't need a dedicated test, you could just use the fixture itself for testing by making it an autouse-fixture:
import pytest
#pytest.fixture(scope="session")
def fxtr_test_list():
return []
...
#pytest.fixture(scope="session", autouse=True)
def global_check(request, fxtr_test_list):
assert len(fxtr_test_list) == 0 # initial check, should pass
yield
assert len(fxtr_test_list) == 0 # final check, should fail
Note that I changed the scope of the first fixture to "session", otherwise it cannot be used with sesssion-based fixture. Also, I have simplified the second fixture to use the standard setup / yield/ teardown pattern.
This gives you something like:
$ python -m pytest -v test_after_all.py
=================================================
...
collected 2 items
test_after_all.py::test_something_1 PASSED
test_after_all.py::test_something_2 PASSED
test_after_all.py::test_something_2 ERROR
======================================================= ERRORS ========================================================
________________________________________ ERROR at teardown of test_something_2 ________________________________________
request = <SubRequest 'global_check' for <Function test_something_1>>, fxtr_test_list = [1, 2]
#pytest.fixture(scope="session", autouse=True)
def global_check(request, fxtr_test_list):
assert len(fxtr_test_list) == 0 # initial check, should pass
yield
> assert len(fxtr_test_list) == 0 # final check, should fail
E assert 2 == 0
E +2
E -0
...
============================================= 2 passed, 1 error in 0.23s ==============================================
If you really need a dedicated test as the last test, could could use an ordering plugin like pytest-order and mark the test as the last:
#pytest.mark.order(-1)
def test_all_tests(global_check):
...

pytest - #parametrize - can i #parametrize a test using data GIVEN by a fixture?

using python 3.8 and pytest 5.3.2
in conftest.py i have a fixture that read data from a json configuration file,
in order to reuse data read from the json configuration file in a several test.
configuration.json
{"username":"user", "endpoints":["https://www.something.com/url1", "https://www.something.com/url2"]}
conftest.py
def pytest_addoption(parser):
parser.addoption("--configuration", action="store", default="configuration.json")
#pytest.fixture(scope="session")
def configuration(request):
configuration= None
configuration= request.config.getoption("--configuration")
with open(configuration, 'r') as f:
configuration= json.load(f)
return configuration
and this works fine :
class TestService():
def test_something(configuration)
assert configuration['username'] == 'user'
in this case the test read data from configuration fixture that read data from configuration.json file.
the problem is when i want to use this data with #parametrize :
i want to convert this approach:
#mark.parametrize('endpoint', [
"https://www.something.com/url1", "https://www.something.com/url2"
])
def test_endpoints_parametrizzato(endpoint):
print(endpoint)
assert requests.get(endpoint).status_code == 200
with this approach
#mark.parametrize('endpoint', configuration['endpoints'])
def test_endpoints_parametrizzato(endpoint):
print(endpoint)
assert requests.get(endpoint).status_code == 200
but this will not work because #parametrize will not "see" the fixture i want to use to parametrize it. i read a lot of articles but i'm not able to understand if i'm doing something wrong or i cannot parametrize reading data from a fixture. can someone help me explaining again? i read
https://github.com/pytest-dev/pytest/issues/349
https://docs.pytest.org/en/latest/proposals/parametrize_with_fixtures.html
Pass a parameter to a fixture function
as discussed here you can't.
you can use a custom test generator
Instead of using a fixture to pass as parametrize argument you can write a function and call it.
def get_configuration(key):
with open("configuration.json", 'r') as f:
configuration = json.load(f)
return configuration.get('key')
#mark.parametrize('endpoint', get_configuration('endpoints'))
def test_endpoints_parametrizzato(endpoint):
print(endpoint)
assert requests.get(endpoint).status_code == 200

Using fixtures at collect time in pytest

I use testinfra with ansible transport. It provides host fixture which has ansible, so I can do host.ansible.get_variables().
Now I need to create a parametrization of test based on value from this inventory.
Inventory:
foo:
hosts:
foo1:
somedata:
- data1
- data2
I want to write a test which tests each of 'data' from somedata for each host in inventory. 'Each host' part is handled by testnfra, but I'm struggling with parametrization of the test:
#pytest.fixture
def somedata(host):
return host.ansible.get_variables()["somedata"]
#pytest.fixture(params=somedata):
def data(request):
return request.param
def test_data(host, data):
assert 'data' in data
I've tried both ways:
#pytest.fixture(params=somedata) -> TypeError: 'function' object is not iterable
#pytest.fixture(params=somedata()) -> Fixture "somedata" called directly. Fixtures are not meant to be called directly...
How can I do this? I understand that I can't change the number of tests at test time, but I pretty sure I have the same inventory at collection time, so, theoretically, it can be doable...
After reading a lot of source code I have came to conclusion, that it's impossible to call fixtures at collection time. There are no fixtures at collection time, and any parametrization should happen before any tests are called. Moreover, it's impossible to change number of tests at test time (so no fixture could change that).
Answering my own question on using Ansible inventory to parametrize a test function: It's possible, but it requires manually reading inventory, hosts, etc. There is a special hook for that: pytest_generate_tests (it's a function, not a fixture).
My current code to get any test parametrized by host_interface fixture is:
def cartesian(hosts, ar):
for host in hosts:
for interface in ar.get_variables(host).get("interfaces",[]):
yield (host, interface)
def pytest_generate_tests(metafunc):
if 'host_interface' in metafunc.fixturenames:
inventory_file = metafunc.config.getoption('ansible_inventory')
ansible_config = testinfra.utils.ansible_runner.get_ansible_config()
inventory = testinfra.utils.ansible_runner.get_ansible_inventory(ansible_config, inventory_file)
ar = testinfra.utils.ansible_runner.AnsibleRunner(inventory_file)
hosts = ar.get_hosts(metafunc.config.option.hosts)
metafunc.parametrize("host_interface", cartesian(hosts, ar))
You should use helper function instead of fixture to parametrize another fixture. Fixtures can not be used as decorator parameters in pytest.
def somedata(host):
return host.ansible.get_variables()["somedata"]
#pytest.fixture(params=somedata()):
def data(request):
return request.param
def test_data(host, data):
assert 'data' in data
This assumes that the host is not a fixture.
If the host is a fixture, there is hacky way to get around the problem. You should write the parameters to a tmp file or in a environment variable and read it with a helper function.
import os
#pytest.fixture(autouse=True)
def somedata(host):
os.environ["host_param"] = host.ansible.get_variables()["somedata"]
def get_params():
return os.environ["host_param"] # do some clean up to return a list instead of a string
#pytest.fixture(params=get_params()):
def data(request):
return request.param
def test_data(host, data):
assert 'data' in data

Give Pytest fixtures different scopes for different tests

In my test suite, I have certain data-generation fixtures which are used with many parameterized tests. Some of these tests would want these fixtures to run only once per session, while others need them to run every function. For example, I may have a fixture similar to:
#pytest.fixture
def get_random_person():
return random.choice(list_of_people)
and 2 parameterized tests, one which wants to use the same person for each test condition and one which wants a new person each time. Is there any way for this fixture to have scope="session" for one test and scope="function" for another?
James' answer is okay, but it doesn't help if you yield from your fixture code. This is a better way to do it:
# Built In
from contextlib import contextmanager
# 3rd Party
import pytest
#pytest.fixture(session='session')
def fixture_session_fruit():
"""Showing how fixtures can still be passed to the different scopes.
If it is `session` scoped then it can be used by all the different scopes;
otherwise, it must be the same scope or higher than the one it is used on.
If this was `module` scoped then this fixture could NOT be used on `fixture_session_scope`.
"""
return "apple"
#contextmanager
def _context_for_fixture(val_to_yield_after_setup):
# Rather long and complicated fixture implementation here
print('SETUP: Running before the test')
yield val_to_yield_after_setup # Let the test code run
print('TEARDOWN: Running after the test')
#pytest.fixture(session='function')
def fixture_function_scope(fixture_session_fruit):
with _context_for_fixture(fixture_session_fruit) as result:
yield result
#pytest.fixture(scope='class')
def fixture_class_scope(fixture_session_fruit):
with _context_for_fixture(fixture_session_fruit) as result:
yield result
#pytest.fixture(scope='module')
def fixture_module_scope(fixture_session_fruit):
with _context_for_fixture(fixture_session_fruit) as result:
yield result
#pytest.fixture(scope='session')
def fixture_session_scope(fixture_session_fruit):
with _context_for_fixture(fixture_session_fruit) as result:
# NOTE if the `_context_for_fixture` just did `yield` without any value,
# there should still be a `yield` here to keep the fixture
# inside the context till it is done. Just remove the ` result` part.
yield result
This way you can still handle contextual fixtures.
Github issue for reference: https://github.com/pytest-dev/pytest/issues/3425
One way to do this to separate out the implementation and then have 2 differently-scoped fixtures return it. So something like:
def _random_person():
return random.choice(list_of_people)
#pytest.fixture(scope='function')
def get_random_person_function_scope():
return _random_person()
#pytest.fixture(scope='session')
def get_random_person_session_scope():
return _random_person()
I've been doing this:
def _some_fixture(a_dependency_fixture):
def __some_fixture(x):
return x
yield __some_fixture
some_temp_fixture = pytest.fixture(_some_fixture, scope="function")
some_module_fixture = pytest.fixture(_some_fixture, scope="module")
some_session_fixture = pytest.fixture(_some_fixture, scope="session")
Less verbose than using a context manager.
Actually there is a workaround for this using the request object.
You could do something like:
#pytest.fixture(scope='class')
def get_random_person(request):
request.scope = getattr(request.cls, 'scope', request.scope)
return random.choice(list_of_people)
Then back at the test class:
#pytest.mark.usefixtures('get_random_person')
class TestSomething:
scope = 'function'
def a_random_test():
def another_test():
However, this only works properly for choosing between 'function' and 'class' scope and particularly if the fixture starts as class-scoped (and then changes to 'function' or is left as is).
If I try the other way around (from 'function' to 'class') funny stuff happen and I still can't figure out why.