sanitycheck: reorg junit output for nicer reports

classname in the junit report was not being used correctly since the
beginning. We had the targetname in the classname and put all results in
1 single testsuite.

The new output add 1 single testsuite for each platform and simplifies
the classname so that tests of the same classname can be grouped in
output, producing readable reports.

The current output can be seen here:

https://testing.zephyrproject.org/daily_tests/zephyr-v2.2.0-2533-gdbbf834605/report/index.html

This should much better once we have this change in.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2020-05-05 09:32:46 -04:00 committed by Kumar Gala
commit a53c813ac5

View file

@ -2941,132 +2941,149 @@ class TestSuite(DisablePyTestCollectionMixin):
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False):
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in self.instances.items():
if platform and instance.platform.name != platform:
continue
total = 0
if platform:
selected = [platform]
else:
selected = self.selected_platforms
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
else:
if instance.status in ["failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
else:
passes += 1
run = "Sanitycheck"
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
eleTestsuite = tree.findall('testsuite')[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skip'] = "%d" % skips
else:
eleTestsuites = ET.Element('testsuites')
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (errors + passes + fails + skips),
failures="%d" % fails,
errors="%d" % (errors), skip="%s" % (skips))
for _, instance in self.instances.items():
if platform and instance.platform.name != platform:
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
else:
if instance.status in ["failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
else:
passes += 1
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
if full_report:
tname = os.path.basename(instance.testcase.name)
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
eleTestsuite = eleTestsuites.findall(f'testsuite/[@name="{p}"]')[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skip'] = "%d" % skips
else:
tname = instance.testcase.name
# remove testcases that are being re-run from exiting reports
if append:
for tc in eleTestsuite.findall('testcase'):
if tc.get('classname') == "%s:%s" % (instance.platform.name, tname):
eleTestsuite.remove(tc)
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skip="%s" % (skips))
handler_time = instance.metrics.get('handler_time', 0)
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
if full_report:
for k in instance.results.keys():
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname="%s:%s" % (instance.platform.name, tname),
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK']:
if instance.results[k] == 'FAIL':
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK']:
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'SKIP':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
'skipped',
type="skipped",
message="Skipped")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
elif instance.results[k] == 'SKIP':
el = ET.SubElement(
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'skipped',
type="skipped",
'failure',
type="failure",
message=instance.reason)
else:
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname="%s:%s" % (instance.platform.name, instance.testcase.name),
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report: