twister: rename TestSuite -> TestPlan

Change terminology and fix usage based on the current hierarchy. A
testsuite is each test application we have in the tree. The top class is
where we detect all the test suites and create the testplan to be run
later.
Content of testplan will have to split out, so it only deals with the
plan and not the overall execution of tests. This step will be done at a
later point.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2022-03-23 13:49:43 -04:00
commit 7424c65779
2 changed files with 106 additions and 107 deletions

View file

@ -717,7 +717,7 @@ class DeviceHandler(Handler):
"""
super().__init__(instance, type_str)
self.suite = None
self.testplan = None
def monitor_serial(self, ser, halt_fileno, harness):
if harness.is_pytest:
@ -776,7 +776,7 @@ class DeviceHandler(Handler):
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for d in self.suite.duts:
for d in self.testplan.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or (d.serial is None and d.serial_pty is None):
@ -794,7 +794,7 @@ class DeviceHandler(Handler):
return None
def make_device_available(self, serial):
for d in self.suite.duts:
for d in self.testplan.duts:
if serial in [d.serial_pty, d.serial]:
d.available = 1
@ -822,7 +822,7 @@ class DeviceHandler(Handler):
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.suite.west_runner
runner = hardware.runner or self.testplan.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
@ -840,7 +840,7 @@ class DeviceHandler(Handler):
logger.debug(f"Using serial device {serial_device} @ {hardware.baud} baud")
if (self.suite.west_flash is not None) or runner:
if (self.testplan.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
@ -851,8 +851,8 @@ class DeviceHandler(Handler):
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if self.testplan.west_flash and self.testplan.west_flash != []:
command_extra_args.extend(self.testplan.west_flash.split(','))
if runner:
command.append("--runner")
@ -883,7 +883,7 @@ class DeviceHandler(Handler):
# Receive parameters from an runner_params field
# of the specified hardware map file.
for d in self.suite.duts:
for d in self.testplan.duts:
if (d.platform == self.instance.platform.name) and d.runner_params:
for param in d.runner_params:
command.append(param)
@ -1751,7 +1751,7 @@ class TestCase(DisablePyTestCollectionMixin):
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
This gets called by TestPlan as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
@ -2545,12 +2545,12 @@ class FilterBuilder(CMake):
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
def __init__(self, tplan, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.testplan = tplan
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
@ -2726,7 +2726,7 @@ class ProjectBuilder(FilterBuilder):
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.suite = None
self.instance.handler.testplan = None
pipeline.put({
"op": "report",
"test": self.instance,
@ -2938,7 +2938,7 @@ class ProjectBuilder(FilterBuilder):
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.testplan = self.testplan
if(self.seed is not None and instance.platform.name.startswith("native_posix")):
self.parse_generated()
@ -2951,7 +2951,7 @@ class ProjectBuilder(FilterBuilder):
sys.stdout.flush()
def gather_metrics(self, instance):
if self.suite.enable_size_report and not self.suite.cmake_only:
if self.testplan.enable_size_report and not self.testplan.cmake_only:
self.calc_one_elf_size(instance)
else:
instance.metrics["ram_size"] = 0
@ -2973,7 +2973,7 @@ class ProjectBuilder(FilterBuilder):
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
class TestSuite(DisablePyTestCollectionMixin):
class TestPlan(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
@ -3022,7 +3022,7 @@ class TestSuite(DisablePyTestCollectionMixin):
else:
self.board_roots = board_root_list
# Testsuite Options
# Test Plan Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False

View file

@ -204,7 +204,7 @@ except ImportError:
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
from twisterlib import HardwareMap, TestSuite, SizeCalculator, CoverageTool, ExecutionCounter
from twisterlib import HardwareMap, TestPlan, SizeCalculator, CoverageTool, ExecutionCounter
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
@ -296,7 +296,7 @@ Artificially long but functional example:
""")
case_select.add_argument("--test-tree", action="store_true",
help="""Output the testsuite in a tree form""")
help="""Output the test plan in a tree form""")
compare_group_option.add_argument("--compare-report",
help="Use this report file for size comparison")
@ -891,39 +891,38 @@ def main():
logger.error("You have provided a wrong subset value: %s." % options.subset)
return
suite = TestSuite(options.board_root, options.testcase_root, options.outdir)
tplan = TestPlan(options.board_root, options.testcase_root, options.outdir)
# Check version of zephyr repo
suite.check_zephyr_version()
tplan.check_zephyr_version()
# Set testsuite options from command line.
suite.build_only = options.build_only
suite.cmake_only = options.cmake_only
suite.cleanup = options.runtime_artifact_cleanup
suite.test_only = options.test_only
suite.retry_build_errors = options.retry_build_errors
suite.enable_slow = options.enable_slow
suite.device_testing = options.device_testing
suite.fixtures = options.fixture
suite.enable_asan = options.enable_asan
suite.enable_lsan = options.enable_lsan
suite.enable_ubsan = options.enable_ubsan
suite.enable_coverage = options.enable_coverage
suite.enable_valgrind = options.enable_valgrind
suite.coverage_platform = options.coverage_platform
suite.inline_logs = options.inline_logs
suite.enable_size_report = options.enable_size_report
suite.extra_args = options.extra_args
suite.west_flash = options.west_flash
suite.west_runner = options.west_runner
suite.verbose = VERBOSE
suite.warnings_as_errors = not options.disable_warnings_as_errors
suite.integration = options.integration
suite.overflow_as_errors = options.overflow_as_errors
suite.suite_name_check = not options.disable_suite_name_check
suite.seed = options.seed
suite.no_skipped_report = options.no_skipped_report
# Set testplan options from command line.
tplan.build_only = options.build_only
tplan.cmake_only = options.cmake_only
tplan.cleanup = options.runtime_artifact_cleanup
tplan.test_only = options.test_only
tplan.retry_build_errors = options.retry_build_errors
tplan.enable_slow = options.enable_slow
tplan.device_testing = options.device_testing
tplan.fixtures = options.fixture
tplan.enable_asan = options.enable_asan
tplan.enable_lsan = options.enable_lsan
tplan.enable_ubsan = options.enable_ubsan
tplan.enable_coverage = options.enable_coverage
tplan.enable_valgrind = options.enable_valgrind
tplan.coverage_platform = options.coverage_platform
tplan.inline_logs = options.inline_logs
tplan.enable_size_report = options.enable_size_report
tplan.extra_args = options.extra_args
tplan.west_flash = options.west_flash
tplan.west_runner = options.west_runner
tplan.verbose = VERBOSE
tplan.warnings_as_errors = not options.disable_warnings_as_errors
tplan.integration = options.integration
tplan.overflow_as_errors = options.overflow_as_errors
tplan.suite_name_check = not options.disable_suite_name_check
tplan.seed = options.seed
tplan.no_skipped_report = options.no_skipped_report
# get all enabled west projects
west_proj = west_projects()
@ -931,39 +930,39 @@ def main():
[p.posixpath for p in west_proj['projects']]
if west_proj else None, None)
modules = [module.meta.get('name') for module in modules_meta]
suite.modules = modules
tplan.modules = modules
if options.ninja:
suite.generator_cmd = "ninja"
suite.generator = "Ninja"
tplan.generator_cmd = "ninja"
tplan.generator = "Ninja"
else:
suite.generator_cmd = "make"
suite.generator = "Unix Makefiles"
tplan.generator_cmd = "make"
tplan.generator = "Unix Makefiles"
# Set number of jobs
if options.jobs:
suite.jobs = options.jobs
tplan.jobs = options.jobs
elif options.build_only:
suite.jobs = multiprocessing.cpu_count() * 2
tplan.jobs = multiprocessing.cpu_count() * 2
else:
suite.jobs = multiprocessing.cpu_count()
logger.info("JOBS: %d" % suite.jobs)
tplan.jobs = multiprocessing.cpu_count()
logger.info("JOBS: %d" % tplan.jobs)
run_individual_tests = []
if options.test:
run_individual_tests = options.test
num = suite.add_testcases(testcase_filter=run_individual_tests)
num = tplan.add_testcases(testcase_filter=run_individual_tests)
if num == 0:
logger.error("No test cases found at the specified location...")
sys.exit(1)
suite.add_configurations()
tplan.add_configurations()
if options.device_testing:
if options.hardware_map:
hwm.load(options.hardware_map)
suite.duts = hwm.duts
tplan.duts = hwm.duts
if not options.platform:
options.platform = []
for d in hwm.duts:
@ -985,23 +984,23 @@ def main():
options.pre_script,
True)
suite.duts = hwm.duts
tplan.duts = hwm.duts
else:
logger.error("""When --device-testing is used with
--device-serial or --device-serial-pty,
only one platform is allowed""")
# the fixtures given by twister command explicitly should be assigned to each DUTs
if suite.fixtures:
for d in suite.duts:
d.fixtures.extend(suite.fixtures)
if tplan.fixtures:
for d in tplan.duts:
d.fixtures.extend(tplan.fixtures)
if suite.load_errors:
if tplan.load_errors:
sys.exit(1)
if options.list_tags:
tags = set()
for _, tc in suite.testcases.items():
for _, tc in tplan.testcases.items():
tags = tags.union(tc.tags)
for t in tags:
@ -1012,7 +1011,7 @@ def main():
if not options.platform and (options.list_tests or options.test_tree or options.list_test_duplicates \
or options.sub_test):
cnt = 0
all_tests = suite.get_all_tests()
all_tests = tplan.get_all_tests()
if options.list_test_duplicates:
import collections
@ -1021,7 +1020,7 @@ def main():
print("Tests with duplicate identifiers:")
for dupe in dupes:
print("- {}".format(dupe))
for dc in suite.get_testcase(dupe):
for dc in tplan.get_testcase(dupe):
print(" - {}".format(dc))
else:
print("No duplicates found.")
@ -1029,7 +1028,7 @@ def main():
if options.sub_test:
for st in options.sub_test:
subtests = suite.get_testcase(st)
subtests = tplan.get_testcase(st)
for sti in subtests:
run_individual_tests.append(sti.name)
@ -1088,20 +1087,20 @@ def main():
last_run = os.path.join(options.outdir, "twister.json")
if options.quarantine_list:
suite.load_quarantine(options.quarantine_list)
tplan.load_quarantine(options.quarantine_list)
if options.quarantine_verify:
if not options.quarantine_list:
logger.error("No quarantine list given to be verified")
sys.exit(1)
suite.quarantine_verify = options.quarantine_verify
tplan.quarantine_verify = options.quarantine_verify
if options.only_failed:
suite.load_from_file(last_run, filter_status=['skipped', 'passed', 'filtered'])
suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
tplan.load_from_file(last_run, filter_status=['skipped', 'passed', 'filtered'])
tplan.selected_platforms = set(p.platform.name for p in tplan.instances.values())
elif options.load_tests:
suite.load_from_file(options.load_tests, filter_status=['skipped', 'error', 'filtered'])
suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
tplan.load_from_file(options.load_tests, filter_status=['skipped', 'error', 'filtered'])
tplan.selected_platforms = set(p.platform.name for p in tplan.instances.values())
elif options.test_only:
# Get list of connected hardware and filter tests to only be run on connected hardware
# in cases where no platform was specified when running the tests.
@ -1112,11 +1111,11 @@ def main():
if connected['connected']:
connected_list.append(connected['platform'])
suite.load_from_file(last_run, filter_status=['skipped', 'error'],
tplan.load_from_file(last_run, filter_status=['skipped', 'error'],
filter_platform=connected_list)
suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
tplan.selected_platforms = set(p.platform.name for p in tplan.instances.values())
else:
discards = suite.apply_filters(
discards = tplan.apply_filters(
enable_slow=options.enable_slow,
platform=options.platform,
exclude_platform=options.exclude_platform,
@ -1138,7 +1137,7 @@ def main():
return
for p in options.platform:
inst = suite.get_platform_instances(p)
inst = tplan.get_platform_instances(p)
count = 0
for i in inst.values():
@ -1167,9 +1166,9 @@ def main():
reason))
if options.report_excluded:
all_tests = suite.get_all_tests()
all_tests = tplan.get_all_tests()
to_be_run = set()
for i, p in suite.instances.items():
for i, p in tplan.instances.items():
to_be_run.update(p.testcase.cases)
if all_tests - to_be_run:
@ -1187,16 +1186,16 @@ def main():
# in parallel, it is more efficient to run in the order:
# "plat1-testA, plat2-testA, ..., plat1-testB, plat2-testB, ..."
if options.device_testing:
suite.instances = OrderedDict(sorted(suite.instances.items(),
tplan.instances = OrderedDict(sorted(tplan.instances.items(),
key=lambda x: x[0][x[0].find("/") + 1:]))
else:
suite.instances = OrderedDict(sorted(suite.instances.items()))
tplan.instances = OrderedDict(sorted(tplan.instances.items()))
# Do calculation based on what is actually going to be run and evaluated
# at runtime, ignore the cases we already know going to be skipped.
# This fixes an issue where some sets would get majority of skips and
# basically run nothing beside filtering.
to_run = {k : v for k,v in suite.instances.items() if v.status is None}
to_run = {k : v for k,v in tplan.instances.items() if v.status is None}
subset, sets = options.subset.split("/")
subset = int(subset)
@ -1217,28 +1216,28 @@ def main():
end = start + per_set
sliced_instances = islice(to_run.items(), start, end)
skipped = {k : v for k,v in suite.instances.items() if v.status == 'skipped'}
errors = {k : v for k,v in suite.instances.items() if v.status == 'error'}
suite.instances = OrderedDict(sliced_instances)
skipped = {k : v for k,v in tplan.instances.items() if v.status == 'skipped'}
errors = {k : v for k,v in tplan.instances.items() if v.status == 'error'}
tplan.instances = OrderedDict(sliced_instances)
if subset == 1:
# add all pre-filtered tests that are skipped or got error status
# to the first set to allow for better distribution among all sets.
suite.instances.update(skipped)
suite.instances.update(errors)
tplan.instances.update(skipped)
tplan.instances.update(errors)
suite.json_report(os.path.join(options.outdir, "testplan.json"))
tplan.json_report(os.path.join(options.outdir, "testplan.json"))
if options.save_tests:
suite.json_report(options.save_tests)
tplan.json_report(options.save_tests)
return
logger.info("%d test scenarios (%d configurations) selected, %d configurations discarded due to filters." %
(len(suite.testcases), len(suite.instances), len(discards)))
(len(tplan.testcases), len(tplan.instances), len(discards)))
if options.device_testing and not options.build_only:
print("\nDevice testing on:")
hwm.dump(filtered=suite.selected_platforms)
hwm.dump(filtered=tplan.selected_platforms)
print("")
if options.dry_run:
@ -1247,7 +1246,7 @@ def main():
return
if options.short_build_path:
suite.create_build_dir_links()
tplan.create_build_dir_links()
retries = options.retry_failed + 1
completed = 0
@ -1256,12 +1255,12 @@ def main():
manager = BaseManager()
manager.start()
results = ExecutionCounter(total=len(suite.instances))
results = ExecutionCounter(total=len(tplan.instances))
pipeline = manager.LifoQueue()
done_queue = manager.LifoQueue()
suite.update_counting(results)
suite.start_time = start_time
tplan.update_counting(results)
tplan.start_time = start_time
while True:
completed += 1
@ -1276,7 +1275,7 @@ def main():
else:
results.failed = results.error
results = suite.execute(pipeline, done_queue, results)
results = tplan.execute(pipeline, done_queue, results)
while True:
try:
@ -1284,10 +1283,10 @@ def main():
except queue.Empty:
break
else:
inst.metrics.update(suite.instances[inst.name].metrics)
inst.metrics.update(tplan.instances[inst.name].metrics)
inst.metrics["handler_time"] = inst.handler.duration if inst.handler else 0
inst.metrics["unrecognized"] = []
suite.instances[inst.name] = inst
tplan.instances[inst.name] = inst
print("")
@ -1305,24 +1304,24 @@ def main():
elif options.last_metrics:
report_to_use = previous_results_file
suite.footprint_reports(report_to_use,
tplan.footprint_reports(report_to_use,
options.show_footprint,
options.all_deltas,
options.footprint_threshold,
options.last_metrics)
suite.duration = time.time() - start_time
tplan.duration = time.time() - start_time
results.summary()
suite.summary(results, options.disable_unrecognized_section_test)
tplan.summary(results, options.disable_unrecognized_section_test)
if options.coverage:
if not options.gcov_tool:
use_system_gcov = False
for plat in options.coverage_platform:
ts_plat = suite.get_platform(plat)
ts_plat = tplan.get_platform(plat)
if ts_plat and (ts_plat.type in {"native", "unit"}):
use_system_gcov = True
@ -1346,12 +1345,12 @@ def main():
table = []
header = ['Board', 'ID', 'Counter']
for d in hwm.duts:
if d.connected and d.platform in suite.selected_platforms:
if d.connected and d.platform in tplan.selected_platforms:
row = [d.platform, d.id, d.counter]
table.append(row)
print(tabulate(table, headers=header, tablefmt="github"))
suite.save_reports(options.report_name,
tplan.save_reports(options.report_name,
options.report_suffix,
options.report_dir,
options.no_update,
@ -1361,7 +1360,7 @@ def main():
)
logger.info("Run completed")
if results.failed or (suite.warnings and options.warnings_as_errors):
if results.failed or (tplan.warnings and options.warnings_as_errors):
sys.exit(1)