scripts: twister: Do not report filtered test instances by default

Filtered testcases are removed by default from Twister tests.
Older functionality is preserved via a new Twister flag:
--report-filtered.
Old tests were adjusted and a new test for that flag added.

Signed-off-by: Lukasz Mrugala <lukaszx.mrugala@intel.com>
This commit is contained in:
Lukasz Mrugala 2024-04-30 14:17:47 +00:00 committed by Carles Cufí
commit 5f4d330db6
4 changed files with 61 additions and 11 deletions

View file

@ -547,6 +547,8 @@ structure in the main Zephyr tree: boards/<arch>/<board_name>/""")
parser.add_argument("--overflow-as-errors", action="store_true", parser.add_argument("--overflow-as-errors", action="store_true",
help="Treat RAM/SRAM overflows as errors.") help="Treat RAM/SRAM overflows as errors.")
parser.add_argument("--report-filtered", action="store_true",
help="Include filtered tests in the reports.")
parser.add_argument("-P", "--exclude-platform", action="append", default=[], parser.add_argument("-P", "--exclude-platform", action="append", default=[],
help="""Exclude platforms and do not build or run any tests help="""Exclude platforms and do not build or run any tests

View file

@ -259,6 +259,8 @@ class Reporting:
for instance in self.instances.values(): for instance in self.instances.values():
if platform and platform != instance.platform.name: if platform and platform != instance.platform.name:
continue continue
if instance.status == "filtered" and not self.env.options.report_filtered:
continue
suite = {} suite = {}
handler_log = os.path.join(instance.build_dir, "handler.log") handler_log = os.path.join(instance.build_dir, "handler.log")
pytest_log = os.path.join(instance.build_dir, "twister_harness.log") pytest_log = os.path.join(instance.build_dir, "twister_harness.log")

View file

@ -344,14 +344,25 @@ class TestReport:
assert str(sys_exit.value) == '0' assert str(sys_exit.value) == '0'
@pytest.mark.parametrize( @pytest.mark.parametrize(
'test_path, expected_testcase_count', 'test_path, flags, expected_testcase_counts',
[(os.path.join(TEST_DATA, 'tests', 'dummy'), 6),], [
ids=['dummy tests'] (
os.path.join(TEST_DATA, 'tests', 'dummy'),
['--detailed-skipped-report'],
{'qemu_x86': 5, 'frdm_k64f': 1}
),
(
os.path.join(TEST_DATA, 'tests', 'dummy'),
['--detailed-skipped-report', '--report-filtered'],
{'qemu_x86': 6, 'frdm_k64f': 6}
),
],
ids=['dummy tests', 'dummy tests with filtered']
) )
def test_detailed_skipped_report(self, out_path, test_path, expected_testcase_count): def test_detailed_skipped_report(self, out_path, test_path, flags, expected_testcase_counts):
test_platforms = ['qemu_x86', 'frdm_k64f'] test_platforms = ['qemu_x86', 'frdm_k64f']
args = ['-i', '--outdir', out_path, '-T', test_path] + \ args = ['-i', '--outdir', out_path, '-T', test_path] + \
['--detailed-skipped-report'] + \ flags + \
[val for pair in zip( [val for pair in zip(
['-p'] * len(test_platforms), test_platforms ['-p'] * len(test_platforms), test_platforms
) for val in pair] ) for val in pair]
@ -367,12 +378,47 @@ class TestReport:
for ts in xml_data.iter('testsuite'): for ts in xml_data.iter('testsuite'):
testsuite_counter += 1 testsuite_counter += 1
# Without the tested flag, filtered testcases would be missing from the report # Without the tested flag, filtered testcases would be missing from the report
assert len(list(ts.iter('testcase'))) == expected_testcase_count, \ testcase_count = len(list(ts.iter('testcase')))
'Not all expected testcases appear in the report.' expected_tc_count = expected_testcase_counts[ts.get('name')]
assert testcase_count == expected_tc_count, \
f'Not all expected testcases appear in the report.' \
f' (In {ts.get("name")}, expected {expected_tc_count}, got {testcase_count}.)'
assert testsuite_counter == len(test_platforms), \ assert testsuite_counter == len(test_platforms), \
'Some platforms are missing from the XML report.' 'Some platforms are missing from the XML report.'
@pytest.mark.parametrize(
'test_path, report_filtered, expected_filtered_count',
[
(os.path.join(TEST_DATA, 'tests', 'dummy'), False, 0),
(os.path.join(TEST_DATA, 'tests', 'dummy'), True, 4),
],
ids=['no filtered', 'with filtered']
)
def test_report_filtered(self, out_path, test_path, report_filtered, expected_filtered_count):
test_platforms = ['qemu_x86', 'frdm_k64f']
args = ['-i', '--outdir', out_path, '-T', test_path] + \
(['--report-filtered'] if report_filtered else []) + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
with open(os.path.join(out_path, 'twister.json')) as f:
j = json.load(f)
testsuites = j.get('testsuites')
assert testsuites, 'No testsuites found.'
statuses = [testsuite.get('status') for testsuite in testsuites]
filtered_status_count = statuses.count("filtered")
assert filtered_status_count == expected_filtered_count, \
f'Expected {expected_filtered_count} filtered statuses, got {filtered_status_count}.'
def test_enable_size_report(self, out_path): def test_enable_size_report(self, out_path):
test_platforms = ['qemu_x86', 'frdm_k64f'] test_platforms = ['qemu_x86', 'frdm_k64f']
path = os.path.join(TEST_DATA, 'tests', 'dummy', 'device', 'group') path = os.path.join(TEST_DATA, 'tests', 'dummy', 'device', 'group')

View file

@ -45,15 +45,15 @@ class TestRunner:
['qemu_x86', 'qemu_x86_64', 'frdm_k64f'], ['qemu_x86', 'qemu_x86_64', 'frdm_k64f'],
{ {
'selected_test_scenarios': 3, 'selected_test_scenarios': 3,
'selected_test_instances': 9, 'selected_test_instances': 6,
'skipped_configurations': 3, 'skipped_configurations': 0,
'skipped_by_static_filter': 3, 'skipped_by_static_filter': 0,
'skipped_at_runtime': 0, 'skipped_at_runtime': 0,
'passed_configurations': 4, 'passed_configurations': 4,
'failed_configurations': 0, 'failed_configurations': 0,
'errored_configurations': 0, 'errored_configurations': 0,
'executed_test_cases': 8, 'executed_test_cases': 8,
'skipped_test_cases': 5, 'skipped_test_cases': 0,
'platform_count': 0, 'platform_count': 0,
'executed_on_platform': 4, 'executed_on_platform': 4,
'only_built': 2 'only_built': 2