Is it possible skipif pytest tests depending the parameter? - pytest

I have a code which includes: test class, tests, fixtures, parameterizing
Like this:
import pytest
#pytest.fixture
def num():
return 1
#pytest.mark.parameterize('n', [1, 2])
class TestNum:
def test_num(self, num, n):
if n == 2:
pytest.skip()
assert num == n
But I want something like this:
import pytest
#pytest.fixture
def num():
return 1
#pytest.mark.parameterize('n', [1, 2])
class TestNum:
#pytest.mark.skipif(n == 2, reason='no reason to test that')
def test_num(self, num, n):
assert num == n
Question: is it possible to skip test depending the class parameter value from "#pytest.mark.parametrize('n', [1, 2])", before fixture run?
Why "if [condition]: pytest.skip()" does not satisfy me:
I work on the web app project, using Playwright framework and my code is like this:
import pytest
from playwright.sync_api import Page
#pytest.fixture
def new_page(page: Page):
page.goto(URL)
return page
#pytest.mark.parameterize('n', [1, 2])
class TestA:
def test_a(self, n, new_page):
if n == 2:
pytest.skip()
assert True
There are fixtures that create a web_page(page) when test starts, and I have a few such fixtures in my test, which create several pages.
So the main issue is to skip test depending the parameter in mark.parametrize before fixtures run, for time saving
After long searching, and theory review, there is no answer to my specific question

Related

How to get an indication that Pytest parameter has been changed

I have a class with multiple tests in it, the class has two parameterizations.
If a particular parameter has changed, I need to revert the machine on which the test ran to its previous state.
Note: I can't use "machine" as one of the test parameters, lets assume it is an external configuration and this class knows how to run on requested "machine" (internal infrastructure limitation)
#pytest.mark.parametrize('iteration', range(1, ITERATIONS + 1), scope='class')
#pytest.mark.parametrize('browser', BROWSERS, ids=[browser.name for browser in BROWSERS],
indirect=True)
class ClassTest:
#pytest.fixture(autouse=True)
def revert_machine:
if browser changed: <------- how to catch if browser has been changed?
do revert machine <-------
#pytest.mark.dependency(name='test_a')
def test_a(self):
assert True
#pytest.mark.dependency(name='test_ams_attack_onset', depends=['test_a'])
def test_a(self):
assert True
How do I catch if browser has been changed?
I've tried classic last/current comparison but a global variable is required for storing LAST_BROWSER, I am looking for more elegant/pytest solution?
collection order:
<Package acceptance>
<Module test_foo.py>
<Class ClassTest>
<Function test_open_browser[chrome-1]>
<Function test_close_browser[chrome-1]>
<Function test_open_browser[ff-1]>
<Function test_close_browser[ff-1]>
I think you are looking for fixture parametrization
import pytest
BROWSERS = ["chrome", "ff"]
ITERATIONS = 2
#pytest.fixture(params=BROWSERS, autouse=True)
def browser(request):
browser_ = request.param
yield browser_
# revert_machine()
#pytest.mark.parametrize('iteration', range(1, ITERATIONS + 1), scope='class')
class Test:
#pytest.mark.dependency(name='test_a')
def test_a(self, iteration):
assert True
#pytest.mark.dependency(name='test_ams_attack_onset', depends=['test_a'])
def test_a(self, iteration):
assert True

Python testing with parameterization from function return

I've been trying to solve a python test using pytest but have not been able to find an example configuration that works - though some are close. Here is my case study:
#pytest.fixture
def vil_check():
code
return [(v1,v2,v3), (...), (...)]
#pytest.mark.parameterize("v1,v2,v3", vil_check):
def test_one(v1,v2,v3):
assert v1 < 2
assert v2 > 5
....
I'm trying to follow this example:
#pytest.mark.parametrize("test_input,expected", [("3+5", 8), ("2+4", 6), ("6*9", 42)])
def test_eval(test_input, expected):
assert eval(test_input) == expected
But using a fixture to supply the list: [("3+5", 8), ("2+4", 6), ("6*9", 42)].
However, this configuration doesn't work:
#pytest.mark.parametrize("v1, v2, v3", vil_check)
def test_max(v1, v2, v3):
assert abs(v1) <= 5
The error is that pytest doesn't see vil_check return as iterable.
There seems to be a way to use pytest_generate_tests to accomplish this but I'm drawing a blank on how to write it.
As per OP's comment, because vil_check need not be a fixture, here's what you can do - remove the fixture decorator from vil_check and call it in mark.parametrize below:
def vil_check():
# code
yield from [(v1,v2,v3), (...), (...)]
#pytest.mark.parametrize("v1,v2,v3", vil_check()):
def test_one(v1,v2,v3):
assert v1 < 2
assert v2 > 5
# code
Few points:
you have spelled parametrized wrong, this may give you error if you have set --strict-markers.
the decorator should not have a :
for performance, i used yield from instead of return in vil_check. This will be efficient in case the list is huge

Pytest run a final test on a shared fixture

I want to collect information from all my tests, to ensure that I've covered everything, but none of the posts I've come across seem to do this specifically.
If I use e.g. atexit, sessionfinish or other means mentioned when searching for "pytest function after all tests", I seem to lose the ability to use the fixture, and they seem like they're just teardown functions, rather than actual tests.
I want to be able to assert that 1 and 2 are in my fixture list, after running all tests.
import pytest
#pytest.fixture(scope="module")
def fxtr_test_list():
return []
def test_something_1(fxtr_test_list):
fxtr_test_list.append(1)
def test_something_2(fxtr_test_list):
fxtr_test_list.append(2)
#pytest.fixture(scope="session")
def global_check(request, fxtr_test_list):
assert len(fxtr_test_list) == 0 # initial check, should pass
def final_check(request):
assert len(fxtr_test_list) == 0 # final check, should fail
request.addfinalizer(final_check)
return request
You can use fixtures only in tests or other fixtures, so using a fixture in some hook is not possible.
If you don't need a dedicated test, you could just use the fixture itself for testing by making it an autouse-fixture:
import pytest
#pytest.fixture(scope="session")
def fxtr_test_list():
return []
...
#pytest.fixture(scope="session", autouse=True)
def global_check(request, fxtr_test_list):
assert len(fxtr_test_list) == 0 # initial check, should pass
yield
assert len(fxtr_test_list) == 0 # final check, should fail
Note that I changed the scope of the first fixture to "session", otherwise it cannot be used with sesssion-based fixture. Also, I have simplified the second fixture to use the standard setup / yield/ teardown pattern.
This gives you something like:
$ python -m pytest -v test_after_all.py
=================================================
...
collected 2 items
test_after_all.py::test_something_1 PASSED
test_after_all.py::test_something_2 PASSED
test_after_all.py::test_something_2 ERROR
======================================================= ERRORS ========================================================
________________________________________ ERROR at teardown of test_something_2 ________________________________________
request = <SubRequest 'global_check' for <Function test_something_1>>, fxtr_test_list = [1, 2]
#pytest.fixture(scope="session", autouse=True)
def global_check(request, fxtr_test_list):
assert len(fxtr_test_list) == 0 # initial check, should pass
yield
> assert len(fxtr_test_list) == 0 # final check, should fail
E assert 2 == 0
E +2
E -0
...
============================================= 2 passed, 1 error in 0.23s ==============================================
If you really need a dedicated test as the last test, could could use an ordering plugin like pytest-order and mark the test as the last:
#pytest.mark.order(-1)
def test_all_tests(global_check):
...

pytest : how to parametrize a test with every fixtures satisfying a certain condition

I have a large set of scenarios defined as pytest fixtures. I would like to run my test suite with all these scenarios. The following example gives a quite satisfying solution :
import pytest
# ----------------------------------------
# My scenarios
#pytest.fixture()
def scn_1():
return 1
#pytest.fixture()
def scn_2():
return 2
#pytest.fixture()
def scn_3():
return 3
# -------------------------------------------------
# A fixture collecting all the scenarios
#pytest.fixture(params=['scn_1', 'scn_2', 'scn_3'])
def scn_result(request):
scn_name = request.param
return request.getfixturevalue(scn_name)
# ----------------------------------------------
# my test suite
def test_a(scn_result):
assert scn_result in [1,2,3]
def test_b(scn_result):
assert scn_result in [1,2,3,4]
The problem is that I have to list manually all the fixture names. Is there a way to parametrize the fixture scn_result with all the fixture whose name starts with "scn_" ? Or any solution allowing to parametrize automatically the tests with all these fixtures.

Give Pytest fixtures different scopes for different tests

In my test suite, I have certain data-generation fixtures which are used with many parameterized tests. Some of these tests would want these fixtures to run only once per session, while others need them to run every function. For example, I may have a fixture similar to:
#pytest.fixture
def get_random_person():
return random.choice(list_of_people)
and 2 parameterized tests, one which wants to use the same person for each test condition and one which wants a new person each time. Is there any way for this fixture to have scope="session" for one test and scope="function" for another?
James' answer is okay, but it doesn't help if you yield from your fixture code. This is a better way to do it:
# Built In
from contextlib import contextmanager
# 3rd Party
import pytest
#pytest.fixture(session='session')
def fixture_session_fruit():
"""Showing how fixtures can still be passed to the different scopes.
If it is `session` scoped then it can be used by all the different scopes;
otherwise, it must be the same scope or higher than the one it is used on.
If this was `module` scoped then this fixture could NOT be used on `fixture_session_scope`.
"""
return "apple"
#contextmanager
def _context_for_fixture(val_to_yield_after_setup):
# Rather long and complicated fixture implementation here
print('SETUP: Running before the test')
yield val_to_yield_after_setup # Let the test code run
print('TEARDOWN: Running after the test')
#pytest.fixture(session='function')
def fixture_function_scope(fixture_session_fruit):
with _context_for_fixture(fixture_session_fruit) as result:
yield result
#pytest.fixture(scope='class')
def fixture_class_scope(fixture_session_fruit):
with _context_for_fixture(fixture_session_fruit) as result:
yield result
#pytest.fixture(scope='module')
def fixture_module_scope(fixture_session_fruit):
with _context_for_fixture(fixture_session_fruit) as result:
yield result
#pytest.fixture(scope='session')
def fixture_session_scope(fixture_session_fruit):
with _context_for_fixture(fixture_session_fruit) as result:
# NOTE if the `_context_for_fixture` just did `yield` without any value,
# there should still be a `yield` here to keep the fixture
# inside the context till it is done. Just remove the ` result` part.
yield result
This way you can still handle contextual fixtures.
Github issue for reference: https://github.com/pytest-dev/pytest/issues/3425
One way to do this to separate out the implementation and then have 2 differently-scoped fixtures return it. So something like:
def _random_person():
return random.choice(list_of_people)
#pytest.fixture(scope='function')
def get_random_person_function_scope():
return _random_person()
#pytest.fixture(scope='session')
def get_random_person_session_scope():
return _random_person()
I've been doing this:
def _some_fixture(a_dependency_fixture):
def __some_fixture(x):
return x
yield __some_fixture
some_temp_fixture = pytest.fixture(_some_fixture, scope="function")
some_module_fixture = pytest.fixture(_some_fixture, scope="module")
some_session_fixture = pytest.fixture(_some_fixture, scope="session")
Less verbose than using a context manager.
Actually there is a workaround for this using the request object.
You could do something like:
#pytest.fixture(scope='class')
def get_random_person(request):
request.scope = getattr(request.cls, 'scope', request.scope)
return random.choice(list_of_people)
Then back at the test class:
#pytest.mark.usefixtures('get_random_person')
class TestSomething:
scope = 'function'
def a_random_test():
def another_test():
However, this only works properly for choosing between 'function' and 'class' scope and particularly if the fixture starts as class-scoped (and then changes to 'function' or is left as is).
If I try the other way around (from 'function' to 'class') funny stuff happen and I still can't figure out why.