twister: pytest: Add --pytest-args to Twister command line

Extend Twister command line with --pytest-args. This parameter
is passed to pytest subprocess. It allows to select a specific
testcase from a test suite.

Signed-off-by: Grzegorz Chwierut <grzegorz.chwierut@nordicsemi.no>
This commit is contained in:
Grzegorz Chwierut 2023-11-21 11:01:17 +01:00 committed by Carles Cufí
commit a1698b691d
4 changed files with 96 additions and 4 deletions

View file

@ -56,6 +56,16 @@ Pytest scans the given locations looking for tests, following its default
`discovery rules <https://docs.pytest.org/en/7.1.x/explanation/goodpractices.html#conventions-for-python-test-discovery>`_
One can also pass some extra arguments to the pytest from yaml file using ``pytest_args`` keyword
under ``harness_config``, e.g.: ``pytest_args: [-k=test_method, --log-level=DEBUG]``.
There is also an option to pass ``--pytest-args`` through Twister command line parameters.
This can be particularly useful when one wants to select a specific testcase from a test suite.
For instance, one can use a command:
.. code-block:: console
$ ./scripts/twister --platform native_sim -T samples/subsys/testsuite/pytest/shell \
-s samples/subsys/testsuite/pytest/shell/sample.pytest.shell \
--pytest-args='-k test_shell_print_version'
Helpers & fixtures
==================

View file

@ -216,6 +216,11 @@ Artificially long but functional example:
and 'fifo_loop' is a name of a function found in main.c without test prefix.
""")
parser.add_argument("--pytest-args",
help="""Pass additional arguments to the pytest subprocess. This parameter
will override the pytest_args from the harness_config in YAML file.
""")
valgrind_asan_group.add_argument(
"--enable-valgrind", action="store_true",
help="""Run binary through valgrind and check for several memory access

View file

@ -309,8 +309,9 @@ class Pytest(Harness):
def generate_command(self):
config = self.instance.testsuite.harness_config
handler: Handler = self.instance.handler
pytest_root = config.get('pytest_root', ['pytest']) if config else ['pytest']
pytest_args = config.get('pytest_args', []) if config else []
pytest_args_yaml = config.get('pytest_args', []) if config else []
pytest_dut_scope = config.get('pytest_dut_scope', None) if config else None
command = [
'pytest',
@ -324,12 +325,19 @@ class Pytest(Harness):
]
command.extend([os.path.normpath(os.path.join(
self.source_dir, os.path.expanduser(os.path.expandvars(src)))) for src in pytest_root])
command.extend(pytest_args)
if handler.options.pytest_args:
command.append(handler.options.pytest_args)
if pytest_args_yaml:
logger.warning(f'The pytest_args ({handler.options.pytest_args}) specified '
'in the command line will override the pytest_args defined '
f'in the YAML file {pytest_args_yaml}')
else:
command.extend(pytest_args_yaml)
if pytest_dut_scope:
command.append(f'--dut-scope={pytest_dut_scope}')
handler: Handler = self.instance.handler
if handler.options.verbose > 1:
command.extend([
'--log-cli-level=DEBUG',
@ -489,6 +497,9 @@ class Pytest(Harness):
tc.status = 'error'
tc.reason = elem.get('message')
tc.output = elem.text
else:
self.state = 'skipped'
self.instance.reason = 'No tests collected'
class Gtest(Harness):

View file

@ -25,6 +25,7 @@ def testinstance() -> TestInstance:
testinstance.handler = mock.Mock()
testinstance.handler.options = mock.Mock()
testinstance.handler.options.verbose = 1
testinstance.handler.options.pytest_args = None
testinstance.handler.type_str = 'native'
return testinstance
@ -67,6 +68,18 @@ def test_pytest_command_extra_args(testinstance: TestInstance):
assert c in command
def test_pytest_command_extra_args_in_options(testinstance: TestInstance):
pytest_harness = Pytest()
pytest_args_from_yaml = '-k test_from_yaml'
pytest_args_from_cmd = '-k test_from_cmd'
testinstance.testsuite.harness_config['pytest_args'] = [pytest_args_from_yaml]
testinstance.handler.options.pytest_args = pytest_args_from_cmd
pytest_harness.configure(testinstance)
command = pytest_harness.generate_command()
assert pytest_args_from_cmd in command
assert pytest_args_from_yaml not in command
@pytest.mark.parametrize(
('pytest_root', 'expected'),
[
@ -222,3 +235,56 @@ def test_if_report_with_skip(pytester, testinstance: TestInstance):
assert len(testinstance.testcases) == 2
for tc in testinstance.testcases:
assert tc.status == "skipped"
def test_if_report_with_filter(pytester, testinstance: TestInstance):
test_file_content = textwrap.dedent("""
import pytest
def test_A():
pass
def test_B():
pass
""")
test_file = pytester.path / 'test_filter.py'
test_file.write_text(test_file_content)
report_file = pytester.path / 'report.xml'
result = pytester.runpytest(
str(test_file),
'-k', 'test_B',
f'--junit-xml={str(report_file)}'
)
result.assert_outcomes(passed=1)
assert report_file.is_file()
pytest_harness = Pytest()
pytest_harness.configure(testinstance)
pytest_harness.report_file = report_file
pytest_harness._update_test_status()
assert pytest_harness.state == "passed"
assert testinstance.status == "passed"
assert len(testinstance.testcases) == 1
def test_if_report_with_no_collected(pytester, testinstance: TestInstance):
test_file_content = textwrap.dedent("""
import pytest
def test_A():
pass
""")
test_file = pytester.path / 'test_filter.py'
test_file.write_text(test_file_content)
report_file = pytester.path / 'report.xml'
result = pytester.runpytest(
str(test_file),
'-k', 'test_B',
f'--junit-xml={str(report_file)}'
)
result.assert_outcomes(passed=0)
assert report_file.is_file()
pytest_harness = Pytest()
pytest_harness.configure(testinstance)
pytest_harness.report_file = report_file
pytest_harness._update_test_status()
assert pytest_harness.state == "skipped"
assert testinstance.status == "skipped"