twister: add --report-summary switch
Added a switch that show failed/error report from the last run. Default shows all items found. However, you can specify the number of items (e.g. --report-summary 15). It also works well with the --outdir switch Signed-off-by: Kamil Paszkiet <kamilx.paszkiet@intel.com>
This commit is contained in:
parent
daaf06db94
commit
a411ae93b7
4 changed files with 106 additions and 63 deletions
|
@ -602,6 +602,12 @@ structure in the main Zephyr tree: boards/<arch>/<board_name>/""")
|
||||||
help="""Create a report with a custom name.
|
help="""Create a report with a custom name.
|
||||||
""")
|
""")
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--report-summary", action="store", nargs='?', type=int, const=0,
|
||||||
|
help="Show failed/error report from latest run. Default shows all items found. "
|
||||||
|
"However, you can specify the number of items (e.g. --report-summary 15). "
|
||||||
|
"It also works well with the --outdir switch.")
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--report-suffix",
|
"--report-suffix",
|
||||||
help="""Add a suffix to all generated file names, for example to add a
|
help="""Add a suffix to all generated file names, for example to add a
|
||||||
|
|
|
@ -26,6 +26,7 @@ class Reporting:
|
||||||
self.env = env
|
self.env = env
|
||||||
self.timestamp = datetime.now().isoformat()
|
self.timestamp = datetime.now().isoformat()
|
||||||
self.outdir = os.path.abspath(env.options.outdir)
|
self.outdir = os.path.abspath(env.options.outdir)
|
||||||
|
self.instance_fail_count = plan.instance_fail_count
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def process_log(log_file):
|
def process_log(log_file):
|
||||||
|
@ -432,20 +433,36 @@ class Reporting:
|
||||||
(report if not last_metrics else "the last twister run.")))
|
(report if not last_metrics else "the last twister run.")))
|
||||||
|
|
||||||
def synopsis(self):
|
def synopsis(self):
|
||||||
|
if self.env.options.report_summary == 0:
|
||||||
|
count = self.instance_fail_count
|
||||||
|
log_txt = f"The following issues were found (showing the all {count} items):"
|
||||||
|
elif self.env.options.report_summary:
|
||||||
|
count = self.env.options.report_summary
|
||||||
|
log_txt = f"The following issues were found "
|
||||||
|
if count > self.instance_fail_count:
|
||||||
|
log_txt += f"(presenting {self.instance_fail_count} out of the {count} items requested):"
|
||||||
|
else:
|
||||||
|
log_txt += f"(showing the {count} of {self.instance_fail_count} items):"
|
||||||
|
else:
|
||||||
|
count = 10
|
||||||
|
log_txt = f"The following issues were found (showing the top {count} items):"
|
||||||
cnt = 0
|
cnt = 0
|
||||||
example_instance = None
|
example_instance = None
|
||||||
detailed_test_id = self.env.options.detailed_test_id
|
detailed_test_id = self.env.options.detailed_test_id
|
||||||
for instance in self.instances.values():
|
for instance in self.instances.values():
|
||||||
if instance.status not in ["passed", "filtered", "skipped"]:
|
if instance.status not in ["passed", "filtered", "skipped"]:
|
||||||
cnt = cnt + 1
|
cnt += 1
|
||||||
if cnt == 1:
|
if cnt == 1:
|
||||||
logger.info("-+" * 40)
|
logger.info("-+" * 40)
|
||||||
logger.info("The following issues were found (showing the top 10 items):")
|
logger.info(log_txt)
|
||||||
|
|
||||||
logger.info(f"{cnt}) {instance.testsuite.name} on {instance.platform.name} {instance.status} ({instance.reason})")
|
logger.info(f"{cnt}) {instance.testsuite.name} on {instance.platform.name} {instance.status} ({instance.reason})")
|
||||||
example_instance = instance
|
example_instance = instance
|
||||||
if cnt == 10:
|
if cnt == count:
|
||||||
break
|
break
|
||||||
|
if cnt == 0 and self.env.options.report_summary is not None:
|
||||||
|
logger.info("-+" * 40)
|
||||||
|
logger.info(f"No errors/fails found")
|
||||||
|
|
||||||
if cnt and example_instance:
|
if cnt and example_instance:
|
||||||
logger.info("")
|
logger.info("")
|
||||||
|
|
|
@ -17,6 +17,7 @@ import copy
|
||||||
import shutil
|
import shutil
|
||||||
import random
|
import random
|
||||||
import snippets
|
import snippets
|
||||||
|
from colorama import Fore
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
|
|
||||||
|
@ -107,6 +108,7 @@ class TestPlan:
|
||||||
self.default_platforms = []
|
self.default_platforms = []
|
||||||
self.load_errors = 0
|
self.load_errors = 0
|
||||||
self.instances = dict()
|
self.instances = dict()
|
||||||
|
self.instance_fail_count = 0
|
||||||
self.warnings = 0
|
self.warnings = 0
|
||||||
|
|
||||||
self.scenarios = []
|
self.scenarios = []
|
||||||
|
@ -217,7 +219,7 @@ class TestPlan:
|
||||||
else:
|
else:
|
||||||
last_run = os.path.join(self.options.outdir, "twister.json")
|
last_run = os.path.join(self.options.outdir, "twister.json")
|
||||||
|
|
||||||
if self.options.only_failed:
|
if self.options.only_failed or self.options.report_summary is not None:
|
||||||
self.load_from_file(last_run)
|
self.load_from_file(last_run)
|
||||||
self.selected_platforms = set(p.platform.name for p in self.instances.values())
|
self.selected_platforms = set(p.platform.name for p in self.instances.values())
|
||||||
elif self.options.load_tests:
|
elif self.options.load_tests:
|
||||||
|
@ -581,72 +583,83 @@ class TestPlan:
|
||||||
instance.add_filter("Not under quarantine", Filters.QUARANTINE)
|
instance.add_filter("Not under quarantine", Filters.QUARANTINE)
|
||||||
|
|
||||||
def load_from_file(self, file, filter_platform=[]):
|
def load_from_file(self, file, filter_platform=[]):
|
||||||
with open(file, "r") as json_test_plan:
|
try:
|
||||||
jtp = json.load(json_test_plan)
|
with open(file, "r") as json_test_plan:
|
||||||
instance_list = []
|
jtp = json.load(json_test_plan)
|
||||||
for ts in jtp.get("testsuites", []):
|
instance_list = []
|
||||||
logger.debug(f"loading {ts['name']}...")
|
for ts in jtp.get("testsuites", []):
|
||||||
testsuite = ts["name"]
|
logger.debug(f"loading {ts['name']}...")
|
||||||
|
testsuite = ts["name"]
|
||||||
|
|
||||||
platform = self.get_platform(ts["platform"])
|
platform = self.get_platform(ts["platform"])
|
||||||
if filter_platform and platform.name not in filter_platform:
|
if filter_platform and platform.name not in filter_platform:
|
||||||
continue
|
continue
|
||||||
instance = TestInstance(self.testsuites[testsuite], platform, self.env.outdir)
|
instance = TestInstance(self.testsuites[testsuite], platform, self.env.outdir)
|
||||||
if ts.get("run_id"):
|
if ts.get("run_id"):
|
||||||
instance.run_id = ts.get("run_id")
|
instance.run_id = ts.get("run_id")
|
||||||
|
|
||||||
if self.options.device_testing:
|
if self.options.device_testing:
|
||||||
tfilter = 'runnable'
|
tfilter = 'runnable'
|
||||||
else:
|
else:
|
||||||
tfilter = 'buildable'
|
tfilter = 'buildable'
|
||||||
instance.run = instance.check_runnable(
|
instance.run = instance.check_runnable(
|
||||||
self.options.enable_slow,
|
self.options.enable_slow,
|
||||||
tfilter,
|
tfilter,
|
||||||
self.options.fixture,
|
self.options.fixture,
|
||||||
self.hwm
|
self.hwm
|
||||||
)
|
)
|
||||||
|
|
||||||
instance.metrics['handler_time'] = ts.get('execution_time', 0)
|
instance.metrics['handler_time'] = ts.get('execution_time', 0)
|
||||||
instance.metrics['used_ram'] = ts.get("used_ram", 0)
|
instance.metrics['used_ram'] = ts.get("used_ram", 0)
|
||||||
instance.metrics['used_rom'] = ts.get("used_rom",0)
|
instance.metrics['used_rom'] = ts.get("used_rom",0)
|
||||||
instance.metrics['available_ram'] = ts.get('available_ram', 0)
|
instance.metrics['available_ram'] = ts.get('available_ram', 0)
|
||||||
instance.metrics['available_rom'] = ts.get('available_rom', 0)
|
instance.metrics['available_rom'] = ts.get('available_rom', 0)
|
||||||
|
|
||||||
status = ts.get('status', None)
|
status = ts.get('status', None)
|
||||||
reason = ts.get("reason", "Unknown")
|
reason = ts.get("reason", "Unknown")
|
||||||
if status in ["error", "failed"]:
|
if status in ["error", "failed"]:
|
||||||
instance.status = None
|
if self.options.report_summary is not None:
|
||||||
instance.reason = None
|
if status == "error": status = "ERROR"
|
||||||
instance.retries += 1
|
elif status == "failed": status = "FAILED"
|
||||||
# test marked as passed (built only) but can run when
|
instance.status = Fore.RED + status + Fore.RESET
|
||||||
# --test-only is used. Reset status to capture new results.
|
instance.reason = reason
|
||||||
elif status == 'passed' and instance.run and self.options.test_only:
|
self.instance_fail_count += 1
|
||||||
instance.status = None
|
else:
|
||||||
instance.reason = None
|
instance.status = None
|
||||||
else:
|
instance.reason = None
|
||||||
instance.status = status
|
instance.retries += 1
|
||||||
instance.reason = reason
|
# test marked as passed (built only) but can run when
|
||||||
|
# --test-only is used. Reset status to capture new results.
|
||||||
|
elif status == 'passed' and instance.run and self.options.test_only:
|
||||||
|
instance.status = None
|
||||||
|
instance.reason = None
|
||||||
|
else:
|
||||||
|
instance.status = status
|
||||||
|
instance.reason = reason
|
||||||
|
|
||||||
self.handle_quarantined_tests(instance, platform)
|
self.handle_quarantined_tests(instance, platform)
|
||||||
|
|
||||||
for tc in ts.get('testcases', []):
|
for tc in ts.get('testcases', []):
|
||||||
identifier = tc['identifier']
|
identifier = tc['identifier']
|
||||||
tc_status = tc.get('status', None)
|
tc_status = tc.get('status', None)
|
||||||
tc_reason = None
|
tc_reason = None
|
||||||
# we set reason only if status is valid, it might have been
|
# we set reason only if status is valid, it might have been
|
||||||
# reset above...
|
# reset above...
|
||||||
if instance.status:
|
if instance.status:
|
||||||
tc_reason = tc.get('reason')
|
tc_reason = tc.get('reason')
|
||||||
if tc_status:
|
if tc_status:
|
||||||
case = instance.set_case_status_by_name(identifier, tc_status, tc_reason)
|
case = instance.set_case_status_by_name(identifier, tc_status, tc_reason)
|
||||||
case.duration = tc.get('execution_time', 0)
|
case.duration = tc.get('execution_time', 0)
|
||||||
if tc.get('log'):
|
if tc.get('log'):
|
||||||
case.output = tc.get('log')
|
case.output = tc.get('log')
|
||||||
|
|
||||||
|
|
||||||
instance.create_overlay(platform, self.options.enable_asan, self.options.enable_ubsan, self.options.enable_coverage, self.options.coverage_platform)
|
instance.create_overlay(platform, self.options.enable_asan, self.options.enable_ubsan, self.options.enable_coverage, self.options.coverage_platform)
|
||||||
instance_list.append(instance)
|
instance_list.append(instance)
|
||||||
self.add_instances(instance_list)
|
self.add_instances(instance_list)
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
logger.error(f"{e}")
|
||||||
|
return 1
|
||||||
|
|
||||||
def apply_filters(self, **kwargs):
|
def apply_filters(self, **kwargs):
|
||||||
|
|
||||||
|
|
|
@ -73,7 +73,7 @@ def main(options):
|
||||||
|
|
||||||
previous_results = None
|
previous_results = None
|
||||||
# Cleanup
|
# Cleanup
|
||||||
if options.no_clean or options.only_failed or options.test_only:
|
if options.no_clean or options.only_failed or options.test_only or options.report_summary is not None:
|
||||||
if os.path.exists(options.outdir):
|
if os.path.exists(options.outdir):
|
||||||
print("Keeping artifacts untouched")
|
print("Keeping artifacts untouched")
|
||||||
elif options.last_metrics:
|
elif options.last_metrics:
|
||||||
|
@ -160,6 +160,13 @@ def main(options):
|
||||||
report.json_report(options.save_tests)
|
report.json_report(options.save_tests)
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
if options.report_summary is not None:
|
||||||
|
if options.report_summary < 0:
|
||||||
|
logger.error("The report summary value cannot be less than 0")
|
||||||
|
return 1
|
||||||
|
report.synopsis()
|
||||||
|
return 0
|
||||||
|
|
||||||
if options.device_testing and not options.build_only:
|
if options.device_testing and not options.build_only:
|
||||||
print("\nDevice testing on:")
|
print("\nDevice testing on:")
|
||||||
hwm.dump(filtered=tplan.selected_platforms)
|
hwm.dump(filtered=tplan.selected_platforms)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue