How to get an indication that Pytest parameter has been changed - pytest

I have a class with multiple tests in it, the class has two parameterizations.
If a particular parameter has changed, I need to revert the machine on which the test ran to its previous state.
Note: I can't use "machine" as one of the test parameters, lets assume it is an external configuration and this class knows how to run on requested "machine" (internal infrastructure limitation)
#pytest.mark.parametrize('iteration', range(1, ITERATIONS + 1), scope='class')
#pytest.mark.parametrize('browser', BROWSERS, ids=[browser.name for browser in BROWSERS],
indirect=True)
class ClassTest:
#pytest.fixture(autouse=True)
def revert_machine:
if browser changed: <------- how to catch if browser has been changed?
do revert machine <-------
#pytest.mark.dependency(name='test_a')
def test_a(self):
assert True
#pytest.mark.dependency(name='test_ams_attack_onset', depends=['test_a'])
def test_a(self):
assert True
How do I catch if browser has been changed?
I've tried classic last/current comparison but a global variable is required for storing LAST_BROWSER, I am looking for more elegant/pytest solution?
collection order:
<Package acceptance>
<Module test_foo.py>
<Class ClassTest>
<Function test_open_browser[chrome-1]>
<Function test_close_browser[chrome-1]>
<Function test_open_browser[ff-1]>
<Function test_close_browser[ff-1]>

I think you are looking for fixture parametrization
import pytest
BROWSERS = ["chrome", "ff"]
ITERATIONS = 2
#pytest.fixture(params=BROWSERS, autouse=True)
def browser(request):
browser_ = request.param
yield browser_
# revert_machine()
#pytest.mark.parametrize('iteration', range(1, ITERATIONS + 1), scope='class')
class Test:
#pytest.mark.dependency(name='test_a')
def test_a(self, iteration):
assert True
#pytest.mark.dependency(name='test_ams_attack_onset', depends=['test_a'])
def test_a(self, iteration):
assert True

Related

Pytest run a final test on a shared fixture

I want to collect information from all my tests, to ensure that I've covered everything, but none of the posts I've come across seem to do this specifically.
If I use e.g. atexit, sessionfinish or other means mentioned when searching for "pytest function after all tests", I seem to lose the ability to use the fixture, and they seem like they're just teardown functions, rather than actual tests.
I want to be able to assert that 1 and 2 are in my fixture list, after running all tests.
import pytest
#pytest.fixture(scope="module")
def fxtr_test_list():
return []
def test_something_1(fxtr_test_list):
fxtr_test_list.append(1)
def test_something_2(fxtr_test_list):
fxtr_test_list.append(2)
#pytest.fixture(scope="session")
def global_check(request, fxtr_test_list):
assert len(fxtr_test_list) == 0 # initial check, should pass
def final_check(request):
assert len(fxtr_test_list) == 0 # final check, should fail
request.addfinalizer(final_check)
return request
You can use fixtures only in tests or other fixtures, so using a fixture in some hook is not possible.
If you don't need a dedicated test, you could just use the fixture itself for testing by making it an autouse-fixture:
import pytest
#pytest.fixture(scope="session")
def fxtr_test_list():
return []
...
#pytest.fixture(scope="session", autouse=True)
def global_check(request, fxtr_test_list):
assert len(fxtr_test_list) == 0 # initial check, should pass
yield
assert len(fxtr_test_list) == 0 # final check, should fail
Note that I changed the scope of the first fixture to "session", otherwise it cannot be used with sesssion-based fixture. Also, I have simplified the second fixture to use the standard setup / yield/ teardown pattern.
This gives you something like:
$ python -m pytest -v test_after_all.py
=================================================
...
collected 2 items
test_after_all.py::test_something_1 PASSED
test_after_all.py::test_something_2 PASSED
test_after_all.py::test_something_2 ERROR
======================================================= ERRORS ========================================================
________________________________________ ERROR at teardown of test_something_2 ________________________________________
request = <SubRequest 'global_check' for <Function test_something_1>>, fxtr_test_list = [1, 2]
#pytest.fixture(scope="session", autouse=True)
def global_check(request, fxtr_test_list):
assert len(fxtr_test_list) == 0 # initial check, should pass
yield
> assert len(fxtr_test_list) == 0 # final check, should fail
E assert 2 == 0
E +2
E -0
...
============================================= 2 passed, 1 error in 0.23s ==============================================
If you really need a dedicated test as the last test, could could use an ordering plugin like pytest-order and mark the test as the last:
#pytest.mark.order(-1)
def test_all_tests(global_check):
...

pytest : how to parametrize a test with every fixtures satisfying a certain condition

I have a large set of scenarios defined as pytest fixtures. I would like to run my test suite with all these scenarios. The following example gives a quite satisfying solution :
import pytest
# ----------------------------------------
# My scenarios
#pytest.fixture()
def scn_1():
return 1
#pytest.fixture()
def scn_2():
return 2
#pytest.fixture()
def scn_3():
return 3
# -------------------------------------------------
# A fixture collecting all the scenarios
#pytest.fixture(params=['scn_1', 'scn_2', 'scn_3'])
def scn_result(request):
scn_name = request.param
return request.getfixturevalue(scn_name)
# ----------------------------------------------
# my test suite
def test_a(scn_result):
assert scn_result in [1,2,3]
def test_b(scn_result):
assert scn_result in [1,2,3,4]
The problem is that I have to list manually all the fixture names. Is there a way to parametrize the fixture scn_result with all the fixture whose name starts with "scn_" ? Or any solution allowing to parametrize automatically the tests with all these fixtures.

pytest - #parametrize - can i #parametrize a test using data GIVEN by a fixture?

using python 3.8 and pytest 5.3.2
in conftest.py i have a fixture that read data from a json configuration file,
in order to reuse data read from the json configuration file in a several test.
configuration.json
{"username":"user", "endpoints":["https://www.something.com/url1", "https://www.something.com/url2"]}
conftest.py
def pytest_addoption(parser):
parser.addoption("--configuration", action="store", default="configuration.json")
#pytest.fixture(scope="session")
def configuration(request):
configuration= None
configuration= request.config.getoption("--configuration")
with open(configuration, 'r') as f:
configuration= json.load(f)
return configuration
and this works fine :
class TestService():
def test_something(configuration)
assert configuration['username'] == 'user'
in this case the test read data from configuration fixture that read data from configuration.json file.
the problem is when i want to use this data with #parametrize :
i want to convert this approach:
#mark.parametrize('endpoint', [
"https://www.something.com/url1", "https://www.something.com/url2"
])
def test_endpoints_parametrizzato(endpoint):
print(endpoint)
assert requests.get(endpoint).status_code == 200
with this approach
#mark.parametrize('endpoint', configuration['endpoints'])
def test_endpoints_parametrizzato(endpoint):
print(endpoint)
assert requests.get(endpoint).status_code == 200
but this will not work because #parametrize will not "see" the fixture i want to use to parametrize it. i read a lot of articles but i'm not able to understand if i'm doing something wrong or i cannot parametrize reading data from a fixture. can someone help me explaining again? i read
https://github.com/pytest-dev/pytest/issues/349
https://docs.pytest.org/en/latest/proposals/parametrize_with_fixtures.html
Pass a parameter to a fixture function
as discussed here you can't.
you can use a custom test generator
Instead of using a fixture to pass as parametrize argument you can write a function and call it.
def get_configuration(key):
with open("configuration.json", 'r') as f:
configuration = json.load(f)
return configuration.get('key')
#mark.parametrize('endpoint', get_configuration('endpoints'))
def test_endpoints_parametrizzato(endpoint):
print(endpoint)
assert requests.get(endpoint).status_code == 200

Using fixtures at collect time in pytest

I use testinfra with ansible transport. It provides host fixture which has ansible, so I can do host.ansible.get_variables().
Now I need to create a parametrization of test based on value from this inventory.
Inventory:
foo:
hosts:
foo1:
somedata:
- data1
- data2
I want to write a test which tests each of 'data' from somedata for each host in inventory. 'Each host' part is handled by testnfra, but I'm struggling with parametrization of the test:
#pytest.fixture
def somedata(host):
return host.ansible.get_variables()["somedata"]
#pytest.fixture(params=somedata):
def data(request):
return request.param
def test_data(host, data):
assert 'data' in data
I've tried both ways:
#pytest.fixture(params=somedata) -> TypeError: 'function' object is not iterable
#pytest.fixture(params=somedata()) -> Fixture "somedata" called directly. Fixtures are not meant to be called directly...
How can I do this? I understand that I can't change the number of tests at test time, but I pretty sure I have the same inventory at collection time, so, theoretically, it can be doable...
After reading a lot of source code I have came to conclusion, that it's impossible to call fixtures at collection time. There are no fixtures at collection time, and any parametrization should happen before any tests are called. Moreover, it's impossible to change number of tests at test time (so no fixture could change that).
Answering my own question on using Ansible inventory to parametrize a test function: It's possible, but it requires manually reading inventory, hosts, etc. There is a special hook for that: pytest_generate_tests (it's a function, not a fixture).
My current code to get any test parametrized by host_interface fixture is:
def cartesian(hosts, ar):
for host in hosts:
for interface in ar.get_variables(host).get("interfaces",[]):
yield (host, interface)
def pytest_generate_tests(metafunc):
if 'host_interface' in metafunc.fixturenames:
inventory_file = metafunc.config.getoption('ansible_inventory')
ansible_config = testinfra.utils.ansible_runner.get_ansible_config()
inventory = testinfra.utils.ansible_runner.get_ansible_inventory(ansible_config, inventory_file)
ar = testinfra.utils.ansible_runner.AnsibleRunner(inventory_file)
hosts = ar.get_hosts(metafunc.config.option.hosts)
metafunc.parametrize("host_interface", cartesian(hosts, ar))
You should use helper function instead of fixture to parametrize another fixture. Fixtures can not be used as decorator parameters in pytest.
def somedata(host):
return host.ansible.get_variables()["somedata"]
#pytest.fixture(params=somedata()):
def data(request):
return request.param
def test_data(host, data):
assert 'data' in data
This assumes that the host is not a fixture.
If the host is a fixture, there is hacky way to get around the problem. You should write the parameters to a tmp file or in a environment variable and read it with a helper function.
import os
#pytest.fixture(autouse=True)
def somedata(host):
os.environ["host_param"] = host.ansible.get_variables()["somedata"]
def get_params():
return os.environ["host_param"] # do some clean up to return a list instead of a string
#pytest.fixture(params=get_params()):
def data(request):
return request.param
def test_data(host, data):
assert 'data' in data

Pytest yield fixture usage

I have a use case where I may use fixture multiple times inside a test in a "context manager" way. See example code below:
in conftest.py
class SomeYield(object):
def __enter__(self):
log.info("SomeYield.__enter__")
def __exit__(self, exc_type, exc_val, exc_tb):
log.info("SomeYield.__exit__")
def generate_name():
name = "{current_time}-{uuid}".format(
current_time=datetime.now().strftime("%Y-%m-%d-%H-%M-%S"),
uuid=str(uuid.uuid4())[:4]
)
return name
#pytest.yield_fixture
def some_yield():
name = generate_name()
log.info("Start: {}".format(name))
yield SomeYield()
log.info("End: {}".format(name))
in test_some_yield.py
def test_some_yield(some_yield):
with some_yield:
pass
with some_yield:
pass
Console output:
INFO:conftest:Start: 2017-12-06-01-50-32-5213
INFO:conftest:SomeYield.__enter__
INFO:conftest:SomeYield.__exit__
INFO:conftest:SomeYield.__enter__
INFO:conftest:SomeYield.__exit__
INFO:conftest:End: 2017-12-06-01-50-32-5213
Questions:
If I have some setup code in SomeYield.enter and cleanup code in
SomeYield.exit, is this the right way to do it using fixture for
multiple calls in my test?
Why didn't I see three occurrences of
enter and exit? Is this expected?