R1703:The if statement can be replaced with 'return bool(test)' - pytest

I write a function to check if the file exists, but pylint throws a message:"R1703:The if statement can be replaced with 'return bool(test)". What the message means? In addition, how to write a pytest script to test my code below?
def is_file_valid(file):
"""
check if input file exits
:param file: file
:return: True/False
"""
if os.path.exists(file):
return True
else:
return False
I've tried if ...==1: but it seems not work.
def is_file_valid(file):
"""
check if input file exits
:param file: file
:return: True/False
"""
if os.path.exists(file)==1:
return True
else:
return False
For pytest script, I write...
file_test = 'test.txt' # actually this file does not exist.
def test_is_file_valid(file_test):
# test is_file_valid()
assert os.path.exists(file_test) is False, "No this file"
os.remove(file_test)
pytest only shows the following message:
ERROR test/unit/test_io_utils.py::test_is_file_valid
Do you guys have any ideas how to figure it out?

The suggestion means that your function could be rewritten to return a boolean result without the need for an if statement. In your case os.path.exists already returns a boolean so it's as simple as returning its result directly.
def is_file_valid(file):
"""
check if input file exits
:param file: file
:return: True/False
"""
return os.path.exists(file)
However, whether the function in this state actually makes sense, is in my opinion questionable because I don't see any "added value" compared to using os.path.exists(file) directly.
As for how to test it... create (or not) a file in Pytest's temporary folder:
def test_is_file_valid_true(tmp_path):
file = tmp_path / 'file.txt'
file.touch()
assert is_file_valid(file)
def test_is_file_valid_false(tmp_path):
file = tmp_path / 'file.txt'
assert not is_file_valid(file)

Related

What is the proper way of testing simple CRUD operations with Pytest to ensure correct reads and writes? Function object instead of value is returned

Using Pytest, I am trying to test a simple database read function and an update function.
The update function needs to be run first to ensure the test database has the values we will test for in our read function.
In /src there is my main script, here are the read, update, delete functions:
async def read(self, collection, query=None, mods=None):
Table = self._cache.classes[collection]
async with self._session() as session:
matches = set(Table.__mapper__.columns.keys()) & set(mods['fields'])
for m in matches:
q = select(getattr(Table, m))
q = q.filter_by(**query)
result = await session.execute(q)
return [row for row in result]
async def update(self, collection, query, data):
Table = self._cache.classes[collection]
async with self._session() as s:
q = sa_update(Table).filter_by(**query)
q = q.values(**data)
await s.execute(q)
await s.commit()
async def delete_collection(self, table_name, syncro_engine=sync_engine):
table = self.retrieve_table_obj(table_name)
async with self._engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all(syncro_engine, [table], checkfirst=True))
When running read the output is [('Wonderland',)] so this is what I am testing in assert in Pytest.
In /test I have a conftest.py file that has these three functions:
#pytest.fixture(scope='function')
def test_update():
def get_update(collection, query, data):
return asyncio.run(DB.update(collection, query, data))
return get_update
#pytest.fixture(scope='function')
def test_read():
def get_read(collection, query, mods):
return asyncio.run(DB.read(collection, query, mods))
return get_read
#pytest.fixture(scope='function')
def drop_table():
def get_delete_tables(table_name, sync_engine):
return asyncio.run(DB.delete_collection(table_name, sync_engine))
return get_delete_tables
Then the tests get run in test_db.py:
# Tests 'update' by updating user 'Alice' with 'org' 'Wonderland'
# Doesn't return anything, using it to ensure the correct values are in test_db to test the read function
#pytest.mark.parametrize('function_input, expected',
[("'user', {'name': 'Alice'}, {'org': 'Wonderland'})", "[('Wonderland',)]")])
def test_update(test_update, function_input, expected):
test_update
# Tests 'read' by reading the just-updated values
#pytest.mark.parametrize('function_input, expected',
[("'user', query={'name': 'Alice'}, mods={'fields': ['name', 'org']})", "[('Wonderland',)]")])
def test_read(test_read, function_input, expected):
assert test_read == expected
# Tests that tables are dropped cleanly by dropping a given table then checking for that table name
#pytest.mark.parametrize('fn_input1, fn_input2, expected',
[('image', sync_engine, 'image')])
def test_drop_table(drop_table, collection_names, fn_input1, fn_input2, expected):
# First drop the table
drop_table(fn_input1, fn_input2)
# Now check that it no longer exists
with pytest.raises(KeyError) as error_info:
assert fn_input1 in collection_names
raise KeyError
assert error_info.type is KeyError
Running test_drop_table does not drop anything from test_db even though it's being passed the correct database test_db and its counterpart in the main script in /src works properly.
When I run this test_read test it fails:
> assert test_read == expected
E assert <function test_read.<locals>.get_read at 0x7f1fa6beed08> == "[('Wonderland',)]"
How can I access the actual returned value to assert against, i.e. [('Wonderland',)], rather than the <function object>?

Skip test if dependency failed for particular parameter

Two tests.
First, check if file exists.
Second, when file exist, check if has any content.
3 files in total.
file_0 exists and has content.
file_1 not exist
file_2 exists and is empty.
I want to skip second test for file_1 (do not see reason to check content, when file not exist) - how to do it?
Current code:
import os
from pathlib import Path
import pytest
file_0 = Path('C:\\file_0.txt')
file_1 = Path('C:\\file_1.txt')
file_2 = Path('C:\\file_2.txt')
#pytest.mark.parametrize('file_path', [file_0, file_1, file_2])
#pytest.mark.dependency(name='test_check_file_exists')
def test_file_exists(file_path):
assert file_path.is_file(), "File does not exists."
#pytest.mark.parametrize('file_path', [file_0, file_1, file_2])
#pytest.mark.dependency(depends=['test_check_file_exists'])
def test_file_has_any_data(file_path):
assert os.path.getsize(file_path) > 0, "File is empty."
Result:
The problem is that parametrized tests are not one, but several tests. To use dependency marks on parametrized tests, you have to make the dependency between specific parametrized tests, in your case from test_file_has_any_data[file_0] to test_file_exists[file_0] and so on.
This can be done by adding a dependency to each parameter:
#pytest.mark.parametrize("file_path", [
pytest.param(file_0, marks=pytest.mark.dependency(name="f0")),
pytest.param(file_1, marks=pytest.mark.dependency(name="f1")),
pytest.param(file_2, marks=pytest.mark.dependency(name="f2"))
])
def test_file_exists(file_path):
assert file_path.is_file(), "File does not exists."
#pytest.mark.parametrize("file_path", [
pytest.param(file_0, marks=pytest.mark.dependency(depends=["f0"])),
pytest.param(file_1, marks=pytest.mark.dependency(depends=["f1"])),
pytest.param(file_2, marks=pytest.mark.dependency(depends=["f2"]))
])
def test_file_has_any_data(file_path):
assert os.path.getsize(file_path) > 0, "File is empty."
pytest.param wraps a single parameter and allows to add marks specifically to that parameter, as can be seen.
This is also covered in the pytest-dependency documentation.

Unable to get testname while calling pytest execution from python or subprocess

I am trying to create test runner python file, that executes the pytest.exe in particular testcase folder and send the results via email.
Here is my code:
test_runner.py:
try:
command = "pytest.exe {app} > {log}".format(app=app_folder, log = log_name)
os.system(command)
except:
send_mail()
I use the following code in conftest.py to add screenshots to pytest-html report.
In conftest.py:
#pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
pytest_html = item.config.pluginmanager.getplugin('html')
outcome = yield
report = outcome.get_result()
extra = getattr(report, 'extra', [])
if pytest_html:
xfail = hasattr(report, 'wasxfail')
if (report.skipped and xfail) or (report.failed and not xfail):
test_case = str(item._testcase).strip(")")
function_name = test_case.split(" ")[0]
file_and_class_name = ((test_case.split(" ")[1]).split("."))[-2:]
file_name = ".".join(file_and_class_name) + "." + function_name
Issue is, when I run the command "pytest.exe app_folder" in windows command prompt it is able to discover the test cases and execute them and get the results. But when I call the command from .py file either using os.command or subprocess it fails with the following exception:
\conftest.py", line 85, in pytest_runtest_makereport
INTERNALERROR> test_case = str(item._testcase).strip(")")
INTERNALERROR> AttributeError: 'TestCaseFunction' object has no attribute
'_testcase'
Can anyone please help me to understand whats happening here? or any other way to get the testcase name?
Update:
To overcome this issue, I alternatively used the TestResult object from pytest_runtest_makereport hook to get the test case details.
#pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
In the above example, report variable contain the TestResult object. This can be manipulated to get the testcase/class/module name.
you can use shell=True option with subprocess to get the desired result
from subprocess import Popen
command='pytest.exe app_folder' #you can paste whole command which you run in cmd
p1=Popen(command,shell=True)
This would solve your purpose

subprocess.run managing optional stdin and stdout

In python >= 3.5 we can give optional stdout, stdin, stderr to subprocess.run()
per the docs:
Valid values are PIPE, DEVNULL, an existing file descriptor (a positive integer),
an existing file object, and None. PIPE indicates that a new pipe to the child
should be created
I want to support passing through (at least) None or existing file objects whilst managing resources pythonically.
How should I manage the optional file resources in something like:
import subprocess
def wraps_subprocess(args=['ls', '-l'], stdin=None, stdout=None):
# ... do important stuff
subprocess.run(args=args, stdin=stdin, stdout=stdout)
A custom contextmanager (idea taken from this answer) seems to work:
import contextlib
#contextlib.contextmanager
def awesome_open(path_or_file_or_none, mode='rb'):
if isinstance(path_or_file_or_none, str):
file_ = needs_closed = open(path_or_file_or_none, mode)
else:
file_ = path_or_file_or_none
needs_closed = None
try:
yield file_
finally:
if needs_closed:
needs_closed.close()
which would be used used like
import subprocess
def wraps_subprocess(args=['ls', '-l'], stdin=None, stdout=None):
# ... do important stuff
with awesome_open(stdin, mode='rb') as fin, awesome_open(stdout, mode='wb') as fout:
subprocess.run(args=args, stdin=fin, stdout=fout)
But I think there is probably a better way.

Pytest - skip (xfail) mixed with parametrize

is there a way to use the #incremental plugin like described att Pytest: how to skip the rest of tests in the class if one has failed? mixed with #pytest.mark.parametrize like below:
#pytest.mark.incremental
Class TestClass:
#pytest.mark.parametrize("input", data)
def test_preprocess_check(self,input):
# prerequisite for test
#pytest.mark.parametrize("input",data)
def test_process_check(self,input):
# test only if test_preprocess_check succeed
The problem i encountered is, at the first fail of test_preprocess_check with a given input of my data set, the following test_preprocess_check and test_process_check are labeled "xfail".
The behaviour i expect will be, at each new "input" of my parametrized data set, the test will act in an incremental fashion.
ex: data = [0,1,2]
if only test_preprocess_check(0) failed:
i got the following report:
1 failed, 5 xfailed
but i expect the report:
1 failed, 1 xfailed, 4 passed
Thanks
After some experiments i found a way to generalize the #incremental to works with parametrize annotation. Simply rewrite the _previousfailed argument to make it unique for each input. The argument _genid was excactly the need.
I added a #pytest.mark.incrementalparam to achieve this.
Code become:
def pytest_runtest_setup(item):
previousfailed_attr = getattr(item, "_genid",None)
if previousfailed_attr is not None:
previousfailed = getattr(item.parent, previousfailed_attr, None)
if previousfailed is not None:
pytest.xfail("previous test failed (%s)" %previousfailed.name)
previousfailed = getattr(item.parent, "_previousfailed", None)
if previousfailed is not None:
pytest.xfail("previous test failed (%s)" %previousfailed.name)
def pytest_runtest_makereport(item, call):
if "incrementalparam" in item.keywords:
if call.excinfo is not None:
previousfailed_attr = item._genid
setattr(item.parent,previousfailed_attr, item)
if "incremental" in item.keywords:
if call.excinfo is not None:
parent = item.parent
parent._previousfailed = item
It's interesting to mention that's it can't be used without parametrize cause parametrize annotation creates automatically _genid variable.
Hope this can helps others than me.