sanitycheck: fix --failed-only handling
- Report build errors as errors, not test failures - Do not try and build/run tests with build failures - Fix issue with empty reports when running --only-failed - Report build errors in the detailed and target reports Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
95717db0f4
commit
f04461e8d0
2 changed files with 40 additions and 20 deletions
|
@ -1725,7 +1725,7 @@ class CMake():
|
||||||
self.instance.status = "skipped"
|
self.instance.status = "skipped"
|
||||||
self.instance.reason = "{} overflow".format(res[0])
|
self.instance.reason = "{} overflow".format(res[0])
|
||||||
else:
|
else:
|
||||||
self.instance.status = "failed"
|
self.instance.status = "error"
|
||||||
self.instance.reason = "Build failure"
|
self.instance.reason = "Build failure"
|
||||||
|
|
||||||
results = {
|
results = {
|
||||||
|
@ -1783,7 +1783,7 @@ class CMake():
|
||||||
results = {'msg': msg, 'filter': filter_results}
|
results = {'msg': msg, 'filter': filter_results}
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self.instance.status = "failed"
|
self.instance.status = "error"
|
||||||
self.instance.reason = "Cmake build failure"
|
self.instance.reason = "Cmake build failure"
|
||||||
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
|
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
|
||||||
results = {"returncode": p.returncode}
|
results = {"returncode": p.returncode}
|
||||||
|
@ -1973,7 +1973,7 @@ class ProjectBuilder(FilterBuilder):
|
||||||
# The build process, call cmake and build with configured generator
|
# The build process, call cmake and build with configured generator
|
||||||
if op == "cmake":
|
if op == "cmake":
|
||||||
results = self.cmake()
|
results = self.cmake()
|
||||||
if self.instance.status == "failed":
|
if self.instance.status in ["failed", "error"]:
|
||||||
pipeline.put({"op": "report", "test": self.instance})
|
pipeline.put({"op": "report", "test": self.instance})
|
||||||
elif self.cmake_only:
|
elif self.cmake_only:
|
||||||
pipeline.put({"op": "report", "test": self.instance})
|
pipeline.put({"op": "report", "test": self.instance})
|
||||||
|
@ -1993,7 +1993,7 @@ class ProjectBuilder(FilterBuilder):
|
||||||
results = self.build()
|
results = self.build()
|
||||||
|
|
||||||
if not results:
|
if not results:
|
||||||
self.instance.status = "failed"
|
self.instance.status = "error"
|
||||||
self.instance.reason = "Build Failure"
|
self.instance.reason = "Build Failure"
|
||||||
pipeline.put({"op": "report", "test": self.instance})
|
pipeline.put({"op": "report", "test": self.instance})
|
||||||
else:
|
else:
|
||||||
|
@ -2060,7 +2060,7 @@ class ProjectBuilder(FilterBuilder):
|
||||||
self.suite.total_done += 1
|
self.suite.total_done += 1
|
||||||
instance = self.instance
|
instance = self.instance
|
||||||
|
|
||||||
if instance.status in ["failed", "timeout"]:
|
if instance.status in ["error", "failed", "timeout"]:
|
||||||
self.suite.total_failed += 1
|
self.suite.total_failed += 1
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
|
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
|
||||||
|
@ -2099,7 +2099,7 @@ class ProjectBuilder(FilterBuilder):
|
||||||
self.suite.total_done, total_tests_width, self.suite.total_tests, instance.platform.name,
|
self.suite.total_done, total_tests_width, self.suite.total_tests, instance.platform.name,
|
||||||
instance.testcase.name, status, more_info))
|
instance.testcase.name, status, more_info))
|
||||||
|
|
||||||
if instance.status in ["failed", "timeout"]:
|
if instance.status in ["error", "failed", "timeout"]:
|
||||||
self.log_info_file(self.inline_logs)
|
self.log_info_file(self.inline_logs)
|
||||||
else:
|
else:
|
||||||
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
|
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
|
||||||
|
@ -2658,6 +2658,8 @@ class TestSuite(DisablePyTestCollectionMixin):
|
||||||
self.device_testing,
|
self.device_testing,
|
||||||
self.fixtures
|
self.fixtures
|
||||||
)
|
)
|
||||||
|
for t in tc.cases:
|
||||||
|
instance.results[t] = None
|
||||||
|
|
||||||
if device_testing_filter:
|
if device_testing_filter:
|
||||||
for h in self.connected_hardware:
|
for h in self.connected_hardware:
|
||||||
|
@ -2815,7 +2817,7 @@ class TestSuite(DisablePyTestCollectionMixin):
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
def calc_one_elf_size(instance):
|
def calc_one_elf_size(instance):
|
||||||
if instance.status not in ["failed", "skipped"]:
|
if instance.status not in ["error", "failed", "skipped"]:
|
||||||
if instance.platform.type != "native":
|
if instance.platform.type != "native":
|
||||||
size_calc = instance.calculate_sizes()
|
size_calc = instance.calculate_sizes()
|
||||||
instance.metrics["ram_size"] = size_calc.get_ram_size()
|
instance.metrics["ram_size"] = size_calc.get_ram_size()
|
||||||
|
@ -2943,7 +2945,6 @@ class TestSuite(DisablePyTestCollectionMixin):
|
||||||
|
|
||||||
|
|
||||||
def xunit_report(self, filename, platform=None, full_report=False, append=False):
|
def xunit_report(self, filename, platform=None, full_report=False, append=False):
|
||||||
|
|
||||||
total = 0
|
total = 0
|
||||||
if platform:
|
if platform:
|
||||||
selected = [platform]
|
selected = [platform]
|
||||||
|
@ -2978,7 +2979,7 @@ class TestSuite(DisablePyTestCollectionMixin):
|
||||||
else:
|
else:
|
||||||
fails += 1
|
fails += 1
|
||||||
else:
|
else:
|
||||||
if instance.status in ["failed", "timeout"]:
|
if instance.status in ["error", "failed", "timeout"]:
|
||||||
if instance.reason in ['build_error', 'handler_crash']:
|
if instance.reason in ['build_error', 'handler_crash']:
|
||||||
errors += 1
|
errors += 1
|
||||||
else:
|
else:
|
||||||
|
@ -2999,10 +3000,20 @@ class TestSuite(DisablePyTestCollectionMixin):
|
||||||
# When we re-run the tests, we re-use the results and update only with
|
# When we re-run the tests, we re-use the results and update only with
|
||||||
# the newly run tests.
|
# the newly run tests.
|
||||||
if os.path.exists(filename) and append:
|
if os.path.exists(filename) and append:
|
||||||
eleTestsuite = eleTestsuites.findall(f'testsuite/[@name="{p}"]')[0]
|
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
|
||||||
eleTestsuite.attrib['failures'] = "%d" % fails
|
if ts:
|
||||||
eleTestsuite.attrib['errors'] = "%d" % errors
|
eleTestsuite = ts[0]
|
||||||
eleTestsuite.attrib['skip'] = "%d" % skips
|
eleTestsuite.attrib['failures'] = "%d" % fails
|
||||||
|
eleTestsuite.attrib['errors'] = "%d" % errors
|
||||||
|
eleTestsuite.attrib['skip'] = "%d" % skips
|
||||||
|
else:
|
||||||
|
logger.info(f"Did not find any existing results for {p}")
|
||||||
|
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
|
||||||
|
name=run, time="%f" % duration,
|
||||||
|
tests="%d" % (total),
|
||||||
|
failures="%d" % fails,
|
||||||
|
errors="%d" % (errors), skip="%s" % (skips))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
|
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
|
||||||
name=run, time="%f" % duration,
|
name=run, time="%f" % duration,
|
||||||
|
@ -3039,6 +3050,7 @@ class TestSuite(DisablePyTestCollectionMixin):
|
||||||
type="failure",
|
type="failure",
|
||||||
message="failed")
|
message="failed")
|
||||||
else:
|
else:
|
||||||
|
|
||||||
el = ET.SubElement(
|
el = ET.SubElement(
|
||||||
eleTestcase,
|
eleTestcase,
|
||||||
'error',
|
'error',
|
||||||
|
@ -3048,28 +3060,37 @@ class TestSuite(DisablePyTestCollectionMixin):
|
||||||
log_file = os.path.join(p, "handler.log")
|
log_file = os.path.join(p, "handler.log")
|
||||||
el.text = self.process_log(log_file)
|
el.text = self.process_log(log_file)
|
||||||
|
|
||||||
|
elif instance.results[k] == 'PASS':
|
||||||
|
pass
|
||||||
elif instance.results[k] == 'SKIP':
|
elif instance.results[k] == 'SKIP':
|
||||||
|
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
|
||||||
|
else:
|
||||||
el = ET.SubElement(
|
el = ET.SubElement(
|
||||||
eleTestcase,
|
eleTestcase,
|
||||||
'skipped',
|
'error',
|
||||||
type="skipped",
|
type="error",
|
||||||
message="Skipped")
|
message=f"{instance.reason}")
|
||||||
else:
|
else:
|
||||||
if platform:
|
if platform:
|
||||||
classname = ".".join(instance.testcase.name.split(".")[:2])
|
classname = ".".join(instance.testcase.name.split(".")[:2])
|
||||||
else:
|
else:
|
||||||
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
|
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
|
||||||
|
|
||||||
|
# remove testcases that are being re-run from exiting reports
|
||||||
|
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"]'):
|
||||||
|
eleTestsuite.remove(tc)
|
||||||
|
|
||||||
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
|
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
|
||||||
classname=classname,
|
classname=classname,
|
||||||
name="%s" % (instance.testcase.name),
|
name="%s" % (instance.testcase.name),
|
||||||
time="%f" % handler_time)
|
time="%f" % handler_time)
|
||||||
if instance.status in ["failed", "timeout"]:
|
if instance.status in ["error", "failed", "timeout"]:
|
||||||
failure = ET.SubElement(
|
failure = ET.SubElement(
|
||||||
eleTestcase,
|
eleTestcase,
|
||||||
'failure',
|
'failure',
|
||||||
type="failure",
|
type="failure",
|
||||||
message=instance.reason)
|
message=instance.reason)
|
||||||
|
|
||||||
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
|
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
|
||||||
bl = os.path.join(p, "build.log")
|
bl = os.path.join(p, "build.log")
|
||||||
hl = os.path.join(p, "handler.log")
|
hl = os.path.join(p, "handler.log")
|
||||||
|
@ -3105,7 +3126,7 @@ class TestSuite(DisablePyTestCollectionMixin):
|
||||||
"handler": instance.platform.simulation}
|
"handler": instance.platform.simulation}
|
||||||
|
|
||||||
rowdict["status"] = instance.status
|
rowdict["status"] = instance.status
|
||||||
if instance.status not in ["failed", "timeout"]:
|
if instance.status not in ["error", "failed", "timeout"]:
|
||||||
if instance.handler:
|
if instance.handler:
|
||||||
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
|
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
|
||||||
ram_size = instance.metrics.get("ram_size", 0)
|
ram_size = instance.metrics.get("ram_size", 0)
|
||||||
|
|
|
@ -916,7 +916,6 @@ def main():
|
||||||
if options.test_tree:
|
if options.test_tree:
|
||||||
for pre, _, node in RenderTree(testsuite):
|
for pre, _, node in RenderTree(testsuite):
|
||||||
print("%s%s" % (pre, node.name))
|
print("%s%s" % (pre, node.name))
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
discards = []
|
discards = []
|
||||||
|
@ -927,7 +926,7 @@ def main():
|
||||||
last_run = os.path.join(options.outdir, "sanitycheck.csv")
|
last_run = os.path.join(options.outdir, "sanitycheck.csv")
|
||||||
|
|
||||||
if options.only_failed:
|
if options.only_failed:
|
||||||
suite.load_from_file(last_run, filter_status=['skipped', 'passed'])
|
suite.load_from_file(last_run, filter_status=['error', 'skipped', 'passed'])
|
||||||
suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
|
suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
|
||||||
elif options.load_tests:
|
elif options.load_tests:
|
||||||
suite.load_from_file(options.load_tests)
|
suite.load_from_file(options.load_tests)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue