twister: Improve counting and count at runtime

Do not wait till the end to update counters, do the counting at
realitime and reuse data for on-screen reporting.

Add a counter summary function for debugging.

This patch changes how we count and report skipped tests and the total
now has the skipped tests included.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2022-03-15 13:15:08 -04:00
commit becef8c83b
2 changed files with 47 additions and 23 deletions

View file

@ -95,6 +95,7 @@ class ExecutionCounter(object):
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_filter = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
@ -104,6 +105,21 @@ class ExecutionCounter(object):
self.lock = Lock()
def summary(self):
logger.debug("--------------------------------")
logger.debug(f"Total Test suites: {self.total}")
logger.debug(f"Total Test cases: {self.cases}")
logger.debug(f"Skipped test cases: {self.skipped_cases}")
logger.debug(f"Completed Testsuites: {self.done}")
logger.debug(f"Passing Testsuites: {self.passed}")
logger.debug(f"Failing Testsuites: {self.failed}")
logger.debug(f"Skipped Testsuites: {self.skipped_configs}")
logger.debug(f"Skipped Testsuites (runtime): {self.skipped_runtime}")
logger.debug(f"Skipped Testsuites (filter): {self.skipped_filter}")
logger.debug(f"Errors: {self.error}")
logger.debug("--------------------------------")
@property
def cases(self):
with self._cases.get_lock():
@ -164,6 +180,16 @@ class ExecutionCounter(object):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_filter(self):
with self._skipped_filter.get_lock():
return self._skipped_filter.value
@skipped_filter.setter
def skipped_filter(self, value):
with self._skipped_filter.get_lock():
self._skipped_filter.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
@ -2596,7 +2622,7 @@ class ProjectBuilder(FilterBuilder):
fin.write(data)
def report_out(self, results):
total_to_do = results.total - results.skipped_configs
total_to_do = results.total
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
@ -2620,8 +2646,14 @@ class ProjectBuilder(FilterBuilder):
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
results.passed += 1
for res in instance.results.values():
if res == 'SKIP':
results.skipped_cases += 1
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
@ -2641,7 +2673,7 @@ class ProjectBuilder(FilterBuilder):
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done, total_tests_width, total_to_do, instance.platform.name,
results.done + results.skipped_filter, total_tests_width, total_to_do , instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
@ -2649,17 +2681,16 @@ class ProjectBuilder(FilterBuilder):
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done) / total_to_do) * 100)
completed_perc = int((float(results.done + results.skipped_filter) / total_to_do) * 100)
skipped = results.skipped_configs + results.skipped_runtime
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done,
results.done + results.skipped_filter,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if skipped > 0 else Fore.RESET,
skipped,
Fore.YELLOW if results.skipped_configs > 0 else Fore.RESET,
results.skipped_filter + results.skipped_runtime,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
@ -2873,20 +2904,13 @@ class TestSuite(DisablePyTestCollectionMixin):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None, initial=False):
results.skipped_configs = 0
results.skipped_cases = 0
def update_counting(self, results=None):
for instance in self.instances.values():
if initial:
results.cases += len(instance.testcase.cases)
results.cases += len(instance.testcase.cases)
if instance.status == 'skipped':
results.skipped_filter += 1
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
results.passed += 1
for res in instance.results.values():
if res == 'SKIP':
results.skipped_cases += 1
def compare_metrics(self, filename):
# name, datatype, lower results better
@ -2979,7 +3003,7 @@ class TestSuite(DisablePyTestCollectionMixin):
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total - results.skipped_configs,
results.total,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
@ -3452,8 +3476,6 @@ class TestSuite(DisablePyTestCollectionMixin):
instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
@ -3480,6 +3502,7 @@ class TestSuite(DisablePyTestCollectionMixin):
# Such configuration has to be removed from discards to make sure it won't get skipped
remove_from_discards.append(instance)
else:
instance.status = "skipped"
instance.fill_results_by_status()

View file

@ -1271,7 +1271,7 @@ def main():
pipeline = manager.LifoQueue()
done_queue = manager.LifoQueue()
suite.update_counting(results, initial=True)
suite.update_counting(results)
suite.start_time = start_time
while True:
@ -1282,7 +1282,6 @@ def main():
time.sleep(options.retry_interval) # waiting for the system to settle down
results.done = results.total - results.failed
results.failed = results.error
results = suite.execute(pipeline, done_queue, results)
while True:
@ -1304,6 +1303,7 @@ def main():
if retries == 0 or results.failed == results.error:
break
# figure out which report to use for size comparison
if options.compare_report:
report_to_use = options.compare_report
@ -1319,7 +1319,8 @@ def main():
options.last_metrics)
suite.duration = time.time() - start_time
suite.update_counting(results)
results.summary()
suite.summary(results, options.disable_unrecognized_section_test)