twister: better handling of error reporting
Stop counting errors as failures and report them as is. This fixes issues with counting when retrying failures and errors. We have been adding errors to failures and then double count them when displaying information on the screen instead of keep an accurate count in the counter class. This is now fixed and we do report errors seperately, ie. build errors. Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
f31b3f03ad
commit
5770bdeb1a
3 changed files with 61 additions and 45 deletions
|
@ -462,14 +462,17 @@ class Reporting:
|
||||||
pass_rate = 0
|
pass_rate = 0
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
|
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {}{}{} errored, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
|
||||||
Fore.RED if failed else Fore.GREEN,
|
Fore.RED if failed else Fore.GREEN,
|
||||||
results.passed,
|
results.passed,
|
||||||
results.total,
|
results.total,
|
||||||
Fore.RESET,
|
Fore.RESET,
|
||||||
pass_rate,
|
pass_rate,
|
||||||
Fore.RED if results.failed else Fore.RESET,
|
Fore.RED if results.failed else Fore.RESET,
|
||||||
results.failed + results.error,
|
results.failed,
|
||||||
|
Fore.RESET,
|
||||||
|
Fore.RED if results.error else Fore.RESET,
|
||||||
|
results.error,
|
||||||
Fore.RESET,
|
Fore.RESET,
|
||||||
results.skipped_configs,
|
results.skipped_configs,
|
||||||
Fore.YELLOW if self.plan.warnings else Fore.RESET,
|
Fore.YELLOW if self.plan.warnings else Fore.RESET,
|
||||||
|
|
|
@ -50,6 +50,9 @@ class ExecutionCounter(object):
|
||||||
# updated by report_out()
|
# updated by report_out()
|
||||||
self._done = Value('i', 0)
|
self._done = Value('i', 0)
|
||||||
|
|
||||||
|
# iteration
|
||||||
|
self._iteration = Value('i', 0)
|
||||||
|
|
||||||
# instances that actually executed and passed
|
# instances that actually executed and passed
|
||||||
# updated by report_out()
|
# updated by report_out()
|
||||||
self._passed = Value('i', 0)
|
self._passed = Value('i', 0)
|
||||||
|
@ -76,24 +79,24 @@ class ExecutionCounter(object):
|
||||||
# initialized to number of test instances
|
# initialized to number of test instances
|
||||||
self._total = Value('i', total)
|
self._total = Value('i', total)
|
||||||
|
|
||||||
# updated in update_counting_after_pipeline()
|
# updated in report_out
|
||||||
self._cases = Value('i', 0)
|
self._cases = Value('i', 0)
|
||||||
self.lock = Lock()
|
self.lock = Lock()
|
||||||
|
|
||||||
def summary(self):
|
def summary(self):
|
||||||
logger.debug("--------------------------------")
|
print("--------------------------------")
|
||||||
logger.debug(f"Total test suites: {self.total}") # actually test instances
|
print(f"Total test suites: {self.total}") # actually test instances
|
||||||
logger.debug(f"Total test cases: {self.cases}")
|
print(f"Total test cases: {self.cases}")
|
||||||
logger.debug(f"Executed test cases: {self.cases - self.skipped_cases}")
|
print(f"Executed test cases: {self.cases - self.skipped_cases}")
|
||||||
logger.debug(f"Skipped test cases: {self.skipped_cases}")
|
print(f"Skipped test cases: {self.skipped_cases}")
|
||||||
logger.debug(f"Completed test suites: {self.done}")
|
print(f"Completed test suites: {self.done}")
|
||||||
logger.debug(f"Passing test suites: {self.passed}")
|
print(f"Passing test suites: {self.passed}")
|
||||||
logger.debug(f"Failing test suites: {self.failed}")
|
print(f"Failing test suites: {self.failed}")
|
||||||
logger.debug(f"Skipped test suites: {self.skipped_configs}")
|
print(f"Skipped test suites: {self.skipped_configs}")
|
||||||
logger.debug(f"Skipped test suites (runtime): {self.skipped_runtime}")
|
print(f"Skipped test suites (runtime): {self.skipped_runtime}")
|
||||||
logger.debug(f"Skipped test suites (filter): {self.skipped_filter}")
|
print(f"Skipped test suites (filter): {self.skipped_filter}")
|
||||||
logger.debug(f"Errors: {self.error}")
|
print(f"Errors: {self.error}")
|
||||||
logger.debug("--------------------------------")
|
print("--------------------------------")
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def cases(self):
|
def cases(self):
|
||||||
|
@ -125,6 +128,16 @@ class ExecutionCounter(object):
|
||||||
with self._error.get_lock():
|
with self._error.get_lock():
|
||||||
self._error.value = value
|
self._error.value = value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def iteration(self):
|
||||||
|
with self._iteration.get_lock():
|
||||||
|
return self._iteration.value
|
||||||
|
|
||||||
|
@iteration.setter
|
||||||
|
def iteration(self, value):
|
||||||
|
with self._iteration.get_lock():
|
||||||
|
self._iteration.value = value
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def done(self):
|
def done(self):
|
||||||
with self._done.get_lock():
|
with self._done.get_lock():
|
||||||
|
@ -714,21 +727,26 @@ class ProjectBuilder(FilterBuilder):
|
||||||
total_tests_width = len(str(total_to_do))
|
total_tests_width = len(str(total_to_do))
|
||||||
results.done += 1
|
results.done += 1
|
||||||
instance = self.instance
|
instance = self.instance
|
||||||
|
if results.iteration == 1:
|
||||||
|
results.cases += len(instance.testcases)
|
||||||
|
|
||||||
if instance.status in ["error", "failed"]:
|
if instance.status in ["error", "failed"]:
|
||||||
if instance.status == "error":
|
if instance.status == "error":
|
||||||
results.error += 1
|
results.error += 1
|
||||||
|
txt = " ERROR "
|
||||||
else:
|
else:
|
||||||
results.failed += 1
|
results.failed += 1
|
||||||
|
txt = " FAILED "
|
||||||
if self.options.verbose:
|
if self.options.verbose:
|
||||||
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
|
status = Fore.RED + txt + Fore.RESET + instance.reason
|
||||||
else:
|
else:
|
||||||
print("")
|
print("")
|
||||||
logger.error(
|
logger.error(
|
||||||
"{:<25} {:<50} {}FAILED{}: {}".format(
|
"{:<25} {:<50} {}{}{}: {}".format(
|
||||||
instance.platform.name,
|
instance.platform.name,
|
||||||
instance.testsuite.name,
|
instance.testsuite.name,
|
||||||
Fore.RED,
|
Fore.RED,
|
||||||
|
txt,
|
||||||
Fore.RESET,
|
Fore.RESET,
|
||||||
instance.reason))
|
instance.reason))
|
||||||
if not self.options.verbose:
|
if not self.options.verbose:
|
||||||
|
@ -767,9 +785,8 @@ class ProjectBuilder(FilterBuilder):
|
||||||
and hasattr(self.instance.handler, 'seed')
|
and hasattr(self.instance.handler, 'seed')
|
||||||
and self.instance.handler.seed is not None ):
|
and self.instance.handler.seed is not None ):
|
||||||
more_info += "/seed: " + str(self.options.seed)
|
more_info += "/seed: " + str(self.options.seed)
|
||||||
|
|
||||||
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
|
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
|
||||||
results.done + results.skipped_filter, total_tests_width, total_to_do , instance.platform.name,
|
results.done, total_tests_width, total_to_do , instance.platform.name,
|
||||||
instance.testsuite.name, status, more_info))
|
instance.testsuite.name, status, more_info))
|
||||||
|
|
||||||
if instance.status in ["error", "failed", "timeout"]:
|
if instance.status in ["error", "failed", "timeout"]:
|
||||||
|
@ -779,9 +796,9 @@ class ProjectBuilder(FilterBuilder):
|
||||||
if total_to_do > 0:
|
if total_to_do > 0:
|
||||||
completed_perc = int((float(results.done + results.skipped_filter) / total_to_do) * 100)
|
completed_perc = int((float(results.done + results.skipped_filter) / total_to_do) * 100)
|
||||||
|
|
||||||
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
|
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s, error: %s%4d%s" % (
|
||||||
Fore.GREEN,
|
Fore.GREEN,
|
||||||
results.done + results.skipped_filter,
|
results.done,
|
||||||
total_to_do,
|
total_to_do,
|
||||||
Fore.RESET,
|
Fore.RESET,
|
||||||
completed_perc,
|
completed_perc,
|
||||||
|
@ -790,6 +807,9 @@ class ProjectBuilder(FilterBuilder):
|
||||||
Fore.RESET,
|
Fore.RESET,
|
||||||
Fore.RED if results.failed > 0 else Fore.RESET,
|
Fore.RED if results.failed > 0 else Fore.RESET,
|
||||||
results.failed,
|
results.failed,
|
||||||
|
Fore.RESET,
|
||||||
|
Fore.RED if results.error > 0 else Fore.RESET,
|
||||||
|
results.error,
|
||||||
Fore.RESET
|
Fore.RESET
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -905,13 +925,13 @@ class TwisterRunner:
|
||||||
def run(self):
|
def run(self):
|
||||||
|
|
||||||
retries = self.options.retry_failed + 1
|
retries = self.options.retry_failed + 1
|
||||||
completed = 0
|
|
||||||
|
|
||||||
BaseManager.register('LifoQueue', queue.LifoQueue)
|
BaseManager.register('LifoQueue', queue.LifoQueue)
|
||||||
manager = BaseManager()
|
manager = BaseManager()
|
||||||
manager.start()
|
manager.start()
|
||||||
|
|
||||||
self.results = ExecutionCounter(total=len(self.instances))
|
self.results = ExecutionCounter(total=len(self.instances))
|
||||||
|
self.iteration = 0
|
||||||
pipeline = manager.LifoQueue()
|
pipeline = manager.LifoQueue()
|
||||||
done_queue = manager.LifoQueue()
|
done_queue = manager.LifoQueue()
|
||||||
|
|
||||||
|
@ -939,17 +959,17 @@ class TwisterRunner:
|
||||||
self.update_counting_before_pipeline()
|
self.update_counting_before_pipeline()
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
completed += 1
|
self.results.iteration += 1
|
||||||
|
|
||||||
if completed > 1:
|
if self.results.iteration > 1:
|
||||||
logger.info("%d Iteration:" % (completed))
|
logger.info("%d Iteration:" % (self.results.iteration))
|
||||||
time.sleep(self.options.retry_interval) # waiting for the system to settle down
|
time.sleep(self.options.retry_interval) # waiting for the system to settle down
|
||||||
self.results.done = self.results.total - self.results.failed
|
self.results.done = self.results.total - self.results.failed - self.results.error
|
||||||
|
self.results.failed = 0
|
||||||
if self.options.retry_build_errors:
|
if self.options.retry_build_errors:
|
||||||
self.results.failed = 0
|
|
||||||
self.results.error = 0
|
self.results.error = 0
|
||||||
else:
|
else:
|
||||||
self.results.failed = self.results.error
|
self.results.done = self.results.skipped_filter
|
||||||
|
|
||||||
self.execute(pipeline, done_queue)
|
self.execute(pipeline, done_queue)
|
||||||
|
|
||||||
|
@ -966,13 +986,14 @@ class TwisterRunner:
|
||||||
|
|
||||||
print("")
|
print("")
|
||||||
|
|
||||||
|
retry_errors = False
|
||||||
|
if self.results.error and self.options.retry_build_errors:
|
||||||
|
retry_errors = True
|
||||||
|
|
||||||
retries = retries - 1
|
retries = retries - 1
|
||||||
# There are cases where failed == error (only build failures),
|
if retries == 0 or ( self.results.failed == 0 and not retry_errors):
|
||||||
# we do not try build failures.
|
|
||||||
if retries == 0 or (self.results.failed == self.results.error and not self.options.retry_build_errors):
|
|
||||||
break
|
break
|
||||||
|
|
||||||
self.update_counting_after_pipeline()
|
|
||||||
self.show_brief()
|
self.show_brief()
|
||||||
|
|
||||||
def update_counting_before_pipeline(self):
|
def update_counting_before_pipeline(self):
|
||||||
|
@ -986,19 +1007,10 @@ class TwisterRunner:
|
||||||
self.results.skipped_filter += 1
|
self.results.skipped_filter += 1
|
||||||
self.results.skipped_configs += 1
|
self.results.skipped_configs += 1
|
||||||
self.results.skipped_cases += len(instance.testsuite.testcases)
|
self.results.skipped_cases += len(instance.testsuite.testcases)
|
||||||
|
self.results.cases += len(instance.testsuite.testcases)
|
||||||
elif instance.status == 'error':
|
elif instance.status == 'error':
|
||||||
self.results.error += 1
|
self.results.error += 1
|
||||||
|
|
||||||
def update_counting_after_pipeline(self):
|
|
||||||
'''
|
|
||||||
Updating counting after pipeline is necessary because the number of test cases
|
|
||||||
of a test instance will be refined based on zephyr.symbols as it goes through the
|
|
||||||
pipeline. While the testsuite.testcases is obtained by scanning the source file.
|
|
||||||
The instance.testcases is more accurate and can only be obtained after pipeline finishes.
|
|
||||||
'''
|
|
||||||
for instance in self.instances.values():
|
|
||||||
self.results.cases += len(instance.testcases)
|
|
||||||
|
|
||||||
def show_brief(self):
|
def show_brief(self):
|
||||||
logger.info("%d test scenarios (%d test instances) selected, "
|
logger.info("%d test scenarios (%d test instances) selected, "
|
||||||
"%d configurations skipped (%d by static filter, %d at runtime)." %
|
"%d configurations skipped (%d by static filter, %d at runtime)." %
|
||||||
|
|
|
@ -202,7 +202,8 @@ def main(options):
|
||||||
|
|
||||||
duration = time.time() - start_time
|
duration = time.time() - start_time
|
||||||
|
|
||||||
runner.results.summary()
|
if VERBOSE > 1:
|
||||||
|
runner.results.summary()
|
||||||
|
|
||||||
report.summary(runner.results, options.disable_unrecognized_section_test, duration)
|
report.summary(runner.results, options.disable_unrecognized_section_test, duration)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue