Skip test if dependency failed for particular parameter - pytest

Two tests.
First, check if file exists.
Second, when file exist, check if has any content.
3 files in total.
file_0 exists and has content.
file_1 not exist
file_2 exists and is empty.
I want to skip second test for file_1 (do not see reason to check content, when file not exist) - how to do it?
Current code:
import os
from pathlib import Path
import pytest
file_0 = Path('C:\\file_0.txt')
file_1 = Path('C:\\file_1.txt')
file_2 = Path('C:\\file_2.txt')
#pytest.mark.parametrize('file_path', [file_0, file_1, file_2])
#pytest.mark.dependency(name='test_check_file_exists')
def test_file_exists(file_path):
assert file_path.is_file(), "File does not exists."
#pytest.mark.parametrize('file_path', [file_0, file_1, file_2])
#pytest.mark.dependency(depends=['test_check_file_exists'])
def test_file_has_any_data(file_path):
assert os.path.getsize(file_path) > 0, "File is empty."
Result:

The problem is that parametrized tests are not one, but several tests. To use dependency marks on parametrized tests, you have to make the dependency between specific parametrized tests, in your case from test_file_has_any_data[file_0] to test_file_exists[file_0] and so on.
This can be done by adding a dependency to each parameter:
#pytest.mark.parametrize("file_path", [
pytest.param(file_0, marks=pytest.mark.dependency(name="f0")),
pytest.param(file_1, marks=pytest.mark.dependency(name="f1")),
pytest.param(file_2, marks=pytest.mark.dependency(name="f2"))
])
def test_file_exists(file_path):
assert file_path.is_file(), "File does not exists."
#pytest.mark.parametrize("file_path", [
pytest.param(file_0, marks=pytest.mark.dependency(depends=["f0"])),
pytest.param(file_1, marks=pytest.mark.dependency(depends=["f1"])),
pytest.param(file_2, marks=pytest.mark.dependency(depends=["f2"]))
])
def test_file_has_any_data(file_path):
assert os.path.getsize(file_path) > 0, "File is empty."
pytest.param wraps a single parameter and allows to add marks specifically to that parameter, as can be seen.
This is also covered in the pytest-dependency documentation.

Related

R1703:The if statement can be replaced with 'return bool(test)'

I write a function to check if the file exists, but pylint throws a message:"R1703:The if statement can be replaced with 'return bool(test)". What the message means? In addition, how to write a pytest script to test my code below?
def is_file_valid(file):
"""
check if input file exits
:param file: file
:return: True/False
"""
if os.path.exists(file):
return True
else:
return False
I've tried if ...==1: but it seems not work.
def is_file_valid(file):
"""
check if input file exits
:param file: file
:return: True/False
"""
if os.path.exists(file)==1:
return True
else:
return False
For pytest script, I write...
file_test = 'test.txt' # actually this file does not exist.
def test_is_file_valid(file_test):
# test is_file_valid()
assert os.path.exists(file_test) is False, "No this file"
os.remove(file_test)
pytest only shows the following message:
ERROR test/unit/test_io_utils.py::test_is_file_valid
Do you guys have any ideas how to figure it out?
The suggestion means that your function could be rewritten to return a boolean result without the need for an if statement. In your case os.path.exists already returns a boolean so it's as simple as returning its result directly.
def is_file_valid(file):
"""
check if input file exits
:param file: file
:return: True/False
"""
return os.path.exists(file)
However, whether the function in this state actually makes sense, is in my opinion questionable because I don't see any "added value" compared to using os.path.exists(file) directly.
As for how to test it... create (or not) a file in Pytest's temporary folder:
def test_is_file_valid_true(tmp_path):
file = tmp_path / 'file.txt'
file.touch()
assert is_file_valid(file)
def test_is_file_valid_false(tmp_path):
file = tmp_path / 'file.txt'
assert not is_file_valid(file)

Yocto - git revision in the image name

By default Yocto adds build timestamp to the output image file name, but I would like to replace it by the revision of my integration Git repository (which references all my layers and configuration files). To achieve this, I put the following code to my image recipe:
def get_image_version(d):
import subprocess
import os.path
try:
parentRepo = os.path.dirname(d.getVar("COREBASE", True))
return subprocess.check_output(["git", "describe", "--tags", "--long", "--dirty"], cwd = parentRepo, stderr = subprocess.DEVNULL).strip().decode('UTF-8')
except:
return d.getVar("MACHINE", True) + "-" + d.getVar("DATETIME", True)
IMAGE_VERSION = "${#get_image_version(d)}"
IMAGE_NAME = "${IMAGE_BASENAME}-${IMAGE_VERSION}"
IMAGE_NAME[vardepsexclude] = "IMAGE_VERSION"
This code works properly until I change Git revision (e.g. by adding a new commit). Then I receive the following error:
ERROR: When reparsing /home/ubuntu/yocto/poky/../mylayer/recipes-custom/images/core-image-minimal.bb.do_image_tar, the basehash value changed from 63e1e69797d2813a4c36297517478a28 to 9788d4bf2950a23d0f758e4508b0a894. The metadata is not deterministic and this needs to be fixed.
I understand this happens because the image recipe has already been parsed with older Git revision, but why constant changes of the build timestamp do not cause the same error? How can I fix my code to overcome this problem?
The timestamp does not have this effect since its added to vardepsexclude:
https://docs.yoctoproject.org/bitbake/bitbake-user-manual/bitbake-user-manual-metadata.html#variable-flags
[vardepsexclude]: Specifies a space-separated list of variables that should be excluded from a variable’s dependencies for the purposes of calculating its signature.
You may need to add this in a couple of places, e.g.:
https://git.yoctoproject.org/poky/tree/meta/classes/image-artifact-names.bbclass#n7
IMAGE_VERSION_SUFFIX ?= "-${DATETIME}"
IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME SOURCE_DATE_EPOCH"
IMAGE_NAME ?= "${IMAGE_BASENAME}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
After some research it turned out the problem was in this line
IMAGE_VERSION = "${#get_image_version(d)}"
because the function get_image_version() was called during parsing. I took inspiration from the source file in aehs29's post and moved the code to the anonymous Python function which is called after parsing.
I also had to add vardepsexclude attribute to the IMAGE_NAME variable. I tried to add vardepvalue flag to IMAGE_VERSION variable as well and in this particular case it did the same job as vardepsexclude. Mentioned Bitbake class uses both flags, but I think in my case using only one of them is enough.
The final code is below:
IMAGE_VERSION ?= "${MACHINE}-${DATETIME}"
IMAGE_NAME = "${IMAGE_BASENAME}-${IMAGE_VERSION}"
IMAGE_NAME[vardepsexclude] += "IMAGE_VERSION"
python () {
import subprocess
import os.path
try:
parentRepo = os.path.dirname(d.getVar("COREBASE", True))
version = subprocess.check_output(["git", "describe", "--tags", "--long", "--dirty"], cwd = parentRepo, stderr = subprocess.DEVNULL).strip().decode('UTF-8')
d.setVar("IMAGE_VERSION", version)
except:
bb.warning("Could not get Git revision, image will have default name.")
}
EDIT:
After some research I realized it's better to define a global variable in layer.conf file of the layer containing the recipes referencing the variable. The variable is set by a python script and is immediately expanded to prevent deterministic build warning:
layer.conf:
require conf/image-version.py.inc
IMAGE_VERSION := "${#get_image_version(d)}"
image-version.py.inc:
def get_image_version(d):
import subprocess
import os.path
try:
parentRepo = os.path.dirname(d.getVar("COREBASE", True))
return subprocess.check_output(["git", "describe", "--tags", "--long", "--dirty"], cwd = parentRepo, stderr = subprocess.DEVNULL).strip().decode('UTF-8')
except:
bb.warn("Could not determine image version. Default naming schema will be used.")
return d.getVar("MACHINE", True) + "-" + d.getVar("DATETIME", True)
I think this is cleaner approach which fits BitBake build system better.

subprocess.run managing optional stdin and stdout

In python >= 3.5 we can give optional stdout, stdin, stderr to subprocess.run()
per the docs:
Valid values are PIPE, DEVNULL, an existing file descriptor (a positive integer),
an existing file object, and None. PIPE indicates that a new pipe to the child
should be created
I want to support passing through (at least) None or existing file objects whilst managing resources pythonically.
How should I manage the optional file resources in something like:
import subprocess
def wraps_subprocess(args=['ls', '-l'], stdin=None, stdout=None):
# ... do important stuff
subprocess.run(args=args, stdin=stdin, stdout=stdout)
A custom contextmanager (idea taken from this answer) seems to work:
import contextlib
#contextlib.contextmanager
def awesome_open(path_or_file_or_none, mode='rb'):
if isinstance(path_or_file_or_none, str):
file_ = needs_closed = open(path_or_file_or_none, mode)
else:
file_ = path_or_file_or_none
needs_closed = None
try:
yield file_
finally:
if needs_closed:
needs_closed.close()
which would be used used like
import subprocess
def wraps_subprocess(args=['ls', '-l'], stdin=None, stdout=None):
# ... do important stuff
with awesome_open(stdin, mode='rb') as fin, awesome_open(stdout, mode='wb') as fout:
subprocess.run(args=args, stdin=fin, stdout=fout)
But I think there is probably a better way.

Pytest - skip (xfail) mixed with parametrize

is there a way to use the #incremental plugin like described att Pytest: how to skip the rest of tests in the class if one has failed? mixed with #pytest.mark.parametrize like below:
#pytest.mark.incremental
Class TestClass:
#pytest.mark.parametrize("input", data)
def test_preprocess_check(self,input):
# prerequisite for test
#pytest.mark.parametrize("input",data)
def test_process_check(self,input):
# test only if test_preprocess_check succeed
The problem i encountered is, at the first fail of test_preprocess_check with a given input of my data set, the following test_preprocess_check and test_process_check are labeled "xfail".
The behaviour i expect will be, at each new "input" of my parametrized data set, the test will act in an incremental fashion.
ex: data = [0,1,2]
if only test_preprocess_check(0) failed:
i got the following report:
1 failed, 5 xfailed
but i expect the report:
1 failed, 1 xfailed, 4 passed
Thanks
After some experiments i found a way to generalize the #incremental to works with parametrize annotation. Simply rewrite the _previousfailed argument to make it unique for each input. The argument _genid was excactly the need.
I added a #pytest.mark.incrementalparam to achieve this.
Code become:
def pytest_runtest_setup(item):
previousfailed_attr = getattr(item, "_genid",None)
if previousfailed_attr is not None:
previousfailed = getattr(item.parent, previousfailed_attr, None)
if previousfailed is not None:
pytest.xfail("previous test failed (%s)" %previousfailed.name)
previousfailed = getattr(item.parent, "_previousfailed", None)
if previousfailed is not None:
pytest.xfail("previous test failed (%s)" %previousfailed.name)
def pytest_runtest_makereport(item, call):
if "incrementalparam" in item.keywords:
if call.excinfo is not None:
previousfailed_attr = item._genid
setattr(item.parent,previousfailed_attr, item)
if "incremental" in item.keywords:
if call.excinfo is not None:
parent = item.parent
parent._previousfailed = item
It's interesting to mention that's it can't be used without parametrize cause parametrize annotation creates automatically _genid variable.
Hope this can helps others than me.

Showing test count in buildbot

I am not particularly happy about the stats that Buildbot provides. I understand that it is for building and not testing - that's why it has a concept of Steps, but no concept of Test. Still there are many cases when you need test statistics from build results. For example when comparing skipped and failed tests on different platforms to estimate the impact of a change.
So, what is needed to make Buildbot display test count in results?
What is the most simple way, so that a person who don't know anything about Buildbot can do this in 15 minutes?
Depending how you want to process the test results and how the test results are presented, Buildbot does provide a Test step, buildbot.steps.shell.Test
An example of how I use it for my build environment:
from buildbot.steps import shell
class CustomStepResult(shell.Test):
description = 'Analyzing results'
descriptionDone = 'Results analyzed'
def __init__(self, log_file = None, *args, **kwargs):
self._log_file = log_file
shell.Test.__init__(self, *args, **kwargs)
self.addFactoryArguments(log_file = log_file)
def start(self):
if not os.path.exists(self._log_file):
self.finished(results.FAILURE)
self.step_status.setText('TestResult XML file not found !')
else:
import xml.etree.ElementTree as etree
tree = etree.parse(self._log_file)
root = tree.getroot()
passing = len(root.findall('./testsuite/testcase/success'))
skipped = len(root.findall('./testsuite/testcase/skip'))
fails = len(root.findall('./testsuite/error')) + len(root.findall('./testsuite/testcase/error')) + len(root.findall('./testsuite/testcase/failure'))
self.setTestResults(total = fails+passing+skipped, failed = fails, passed = passing)
## the final status for WARNINGS is green but the step itself will be orange
self.finished(results.SUCCESS if fails == 0 else results.WARNINGS)
self.step_status.setText(self.describe(True))
And in the configuration factory I create a step as below:
factory.addStep(CustomStepResult(log_file = log_file))
Basically I override the default Test shell step and pass a custom XML file which contains my test results. I then look for the pass/fail/skip result nodes and accordingly display the results in the waterfall.