twister: use json instead of csv for everything

Drop CSV generation which was the standard for sharing information since
the early days and instead use json with more content to share
information across sessions and for report generation.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2022-03-19 08:08:44 -04:00
commit 3dcc038274
2 changed files with 55 additions and 98 deletions

View file

@ -3127,12 +3127,14 @@ class TestSuite(DisablePyTestCollectionMixin):
results = [] results = []
saved_metrics = {} saved_metrics = {}
with open(filename) as fp: with open(filename) as fp:
cr = csv.DictReader(fp) jt = json.load(fp)
for row in cr: for ts in jt.get("testsuites", []):
d = {} d = {}
for m, _, _ in interesting_metrics: for m, _, _ in interesting_metrics:
d[m] = row[m] d[m] = ts.get(m, 0)
saved_metrics[(row["test"], row["platform"])] = d ts_name = ts.get('name')
ts_platform = ts.get('platform')
saved_metrics[(ts_name, ts_platform)] = d
for instance in self.instances.values(): for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name) mkey = (instance.testcase.name, instance.platform.name)
@ -3258,18 +3260,12 @@ class TestSuite(DisablePyTestCollectionMixin):
append=only_failed, version=self.version) append=only_failed, version=self.version)
self.xunit_report(filename + "_report.xml", full_report=True, self.xunit_report(filename + "_report.xml", full_report=True,
append=only_failed, version=self.version) append=only_failed, version=self.version)
self.csv_report(filename + ".csv")
if json_report: if json_report:
self.json_report(filename + ".json", append=only_failed, version=self.version) self.json_report(filename + ".json", append=only_failed, version=self.version)
if platform_reports: if platform_reports:
self.target_report(outdir, suffix, append=only_failed) self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self): def add_configurations(self):
@ -3433,20 +3429,21 @@ class TestSuite(DisablePyTestCollectionMixin):
def load_from_file(self, file, filter_status=[], filter_platform=[]): def load_from_file(self, file, filter_status=[], filter_platform=[]):
try: try:
with open(file, "r") as fp: with open(file, "r") as json_test_plan:
cr = csv.DictReader(fp) jtp = json.load(json_test_plan)
instance_list = [] instance_list = []
for row in cr: for ts in jtp.get("testsuites", []):
if row["status"] in filter_status: logger.debug(f"loading {ts['name']}...")
continue #if ts["status"] in filter_status:
test = row["test"] # continue
testsuite = ts["name"]
platform = self.get_platform(row["platform"]) platform = self.get_platform(ts["platform"])
if filter_platform and platform.name not in filter_platform: if filter_platform and platform.name not in filter_platform:
continue continue
instance = TestInstance(self.testcases[test], platform, self.outdir) instance = TestInstance(self.testcases[testsuite], platform, self.outdir)
if "run_id" in row and row["run_id"] != "na": if ts.get("run_id"):
instance.run_id = row["run_id"] instance.run_id = ts.get("run_id")
if self.device_testing: if self.device_testing:
tfilter = 'runnable' tfilter = 'runnable'
else: else:
@ -3456,6 +3453,13 @@ class TestSuite(DisablePyTestCollectionMixin):
tfilter, tfilter,
self.fixtures self.fixtures
) )
instance.status = ts['status']
instance.reason = ts.get("reason", "Unknown")
for t in ts.get('testcases', []):
identifier = t['identifier']
status = ts.get('status', None)
if status:
instance.results[identifier] = status
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform) instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance) instance_list.append(instance)
self.add_instances(instance_list) self.add_instances(instance_list)
@ -3802,25 +3806,6 @@ class TestSuite(DisablePyTestCollectionMixin):
return results return results
def discard_report(self, filename):
try:
if not self.discards:
raise TwisterRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False): def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()} platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms: for platform in platforms:
@ -3878,7 +3863,7 @@ class TestSuite(DisablePyTestCollectionMixin):
for _, instance in inst.items(): for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0) handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time duration += handler_time
if full_report and instance.run: if full_report: # and instance.run:
for k in instance.results.keys(): for k in instance.results.keys():
if instance.results[k] == 'PASS': if instance.results[k] == 'PASS':
passes += 1 passes += 1
@ -4030,51 +4015,25 @@ class TestSuite(DisablePyTestCollectionMixin):
return fails, passes, errors, skips return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size", "run_id"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
try:
rowdict["run_id"] = instance.run_id
except AttributeError:
# No run_id available
rowdict["run_id"] = "na"
cw.writerow(rowdict)
def json_report(self, filename, append=False, version="NA"): def json_report(self, filename, append=False, version="NA"):
logger.info(f"Writing JSON report {filename}") logger.info(f"Writing JSON report {filename}")
report = {} report = {}
selected = self.selected_platforms
report["environment"] = {"os": os.name, report["environment"] = {"os": os.name,
"zephyr_version": version, "zephyr_version": version,
"toolchain": self.get_toolchain() "toolchain": self.get_toolchain()
} }
json_data = {} json_data = {}
if os.path.exists(filename) and append: if os.path.exists(filename) and append:
logger.debug(f"Loading previous data from {filename}")
with open(filename, 'r') as json_file: with open(filename, 'r') as json_file:
json_data = json.load(json_file) json_data = json.load(json_file)
suites = json_data.get("testsuites", []) suites = json_data.get("testsuites", [])
# remove existing testcases that were re-run
for instance in self.instances.values():
suites = list(filter(lambda d: d['name'] != instance.testcase.name, suites))
for instance in self.instances.values(): for instance in self.instances.values():
suite = {} suite = {}
handler_log = os.path.join(instance.build_dir, "handler.log") handler_log = os.path.join(instance.build_dir, "handler.log")
@ -4085,7 +4044,7 @@ class TestSuite(DisablePyTestCollectionMixin):
ram_size = instance.metrics.get ("ram_size", 0) ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0) rom_size = instance.metrics.get("rom_size",0)
suite = { suite = {
"testcase": instance.testcase.name, "name": instance.testcase.name,
"arch": instance.platform.arch, "arch": instance.platform.arch,
"platform": instance.platform.name, "platform": instance.platform.name,
} }
@ -4093,17 +4052,22 @@ class TestSuite(DisablePyTestCollectionMixin):
suite["ram_size"] = ram_size suite["ram_size"] = ram_size
if rom_size: if rom_size:
suite["rom_size"] = rom_size suite["rom_size"] = rom_size
suite["execution_time"] = handler_time
if instance.status in ["error", "failed", "timeout", "flash_error"]: if instance.status in ["error", "failed", "timeout", "flash_error"]:
suite["status"] = "failed" suite["status"] = "failed"
suite["reason"] = instance.reason suite["reason"] = instance.reason
suite["execution_time"] = handler_time
if os.path.exists(handler_log): if os.path.exists(handler_log):
suite["test_output"] = self.process_log(handler_log) suite["test_output"] = self.process_log(handler_log)
elif os.path.exists(device_log): elif os.path.exists(device_log):
suite["device_log"] = self.process_log(device_log) suite["device_log"] = self.process_log(device_log)
else: else:
suite["build_log"] = self.process_log(build_log) suite["build_log"] = self.process_log(build_log)
elif instance.status == 'filtered':
suite["status"] = "filtered"
suite["reason"] = instance.reason
else:
suite["status"] = instance.status
testcases = [] testcases = []
for k in instance.results.keys(): for k in instance.results.keys():
@ -4112,12 +4076,10 @@ class TestSuite(DisablePyTestCollectionMixin):
if instance.results[k] in ["SKIP"]: if instance.results[k] in ["SKIP"]:
testcase["status"] = "skipped" testcase["status"] = "skipped"
testcase["reason"] = instance.reason testcase["reason"] = instance.reason
elif instance.status == 'filtered':
testcase["status"] = "filtered"
testcase["reason"] = instance.reason
elif instance.results[k] in ["PASS"] or instance.status == 'passed': elif instance.results[k] in ["PASS"] or instance.status == 'passed':
testcase["status"] = "passed" testcase["status"] = "passed"
elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout", "flash_error"]: elif instance.results[k] in ['FAIL', 'BLOCK'] or \
instance.status in ["error", "failed", "timeout", "flash_error"]:
testcase["status"] = "failed" testcase["status"] = "failed"
testcase["reason"] = instance.reason testcase["reason"] = instance.reason
@ -4126,7 +4088,6 @@ class TestSuite(DisablePyTestCollectionMixin):
suites.append(suite) suites.append(suite)
report["testsuites"] = suites report["testsuites"] = suites
with open(filename, "wt") as json_file: with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':')) json.dump(report, json_file, indent=4, separators=(',',':'))

View file

@ -152,8 +152,8 @@ pairs:
Would match it. Would match it.
The set of test cases that actually run depends on directives in the testcase The set of test cases that actually run depends on directives in the testcase
filed and options passed in on the command line. If there is any confusion, files and options passed in on the command line. If there is any confusion,
running with -v or examining the discard report (twister_discard.csv) running with -v or examining the test plan report (testplan.json)
can help show why particular test cases were skipped. can help show why particular test cases were skipped.
Metrics (such as pass/fail state and binary size) for the last code Metrics (such as pass/fail state and binary size) for the last code
@ -173,12 +173,10 @@ import argparse
import sys import sys
import logging import logging
import time import time
import itertools
import shutil import shutil
from collections import OrderedDict from collections import OrderedDict
import multiprocessing import multiprocessing
from itertools import islice from itertools import islice
import csv
from colorama import Fore from colorama import Fore
from pathlib import Path from pathlib import Path
from multiprocessing.managers import BaseManager from multiprocessing.managers import BaseManager
@ -551,11 +549,6 @@ structure in the main Zephyr tree: boards/<arch>/<board_name>/""")
help="Number of jobs for building, defaults to number of CPU threads, " help="Number of jobs for building, defaults to number of CPU threads, "
"overcommitted by factor 2 when --build-only.") "overcommitted by factor 2 when --build-only.")
parser.add_argument(
"--json-report", action="store_true",
help="""Generate a JSON file with all test results. [Experimental]
""")
parser.add_argument( parser.add_argument(
"-K", "--force-platform", action="store_true", "-K", "--force-platform", action="store_true",
help="""Force testing on selected platforms, help="""Force testing on selected platforms,
@ -604,8 +597,8 @@ structure in the main Zephyr tree: boards/<arch>/<board_name>/""")
"-o", "--report-dir", "-o", "--report-dir",
help="""Output reports containing results of the test run into the help="""Output reports containing results of the test run into the
specified directory. specified directory.
The output will be both in CSV and JUNIT format The output will be both in JSON and JUNIT format
(twister.csv and twister.xml). (twister.json and twister.xml).
""") """)
parser.add_argument("--overflow-as-errors", action="store_true", parser.add_argument("--overflow-as-errors", action="store_true",
@ -795,9 +788,9 @@ structure in the main Zephyr tree: boards/<arch>/<board_name>/""")
parser.add_argument( parser.add_argument(
"-y", "--dry-run", action="store_true", "-y", "--dry-run", action="store_true",
help="""Create the filtered list of test cases, but don't actually help="""Create the filtered list of test cases, but don't actually
run them. Useful if you're just interested in the discard report run them. Useful if you're just interested in the test plan
generated for every run and saved in the specified output generated for every run and saved in the specified output
directory (twister_discard.csv). directory (testplan.json).
""") """)
parser.add_argument( parser.add_argument(
@ -879,7 +872,7 @@ def main():
if os.path.exists(options.outdir): if os.path.exists(options.outdir):
print("Keeping artifacts untouched") print("Keeping artifacts untouched")
elif options.last_metrics: elif options.last_metrics:
ls = os.path.join(options.outdir, "twister.csv") ls = os.path.join(options.outdir, "twister.json")
if os.path.exists(ls): if os.path.exists(ls):
with open(ls, "r") as fp: with open(ls, "r") as fp:
previous_results = fp.read() previous_results = fp.read()
@ -900,7 +893,7 @@ def main():
previous_results_file = None previous_results_file = None
os.makedirs(options.outdir, exist_ok=True) os.makedirs(options.outdir, exist_ok=True)
if options.last_metrics and previous_results: if options.last_metrics and previous_results:
previous_results_file = os.path.join(options.outdir, "baseline.csv") previous_results_file = os.path.join(options.outdir, "baseline.json")
with open(previous_results_file, "w") as fp: with open(previous_results_file, "w") as fp:
fp.write(previous_results) fp.write(previous_results)
@ -1132,9 +1125,9 @@ def main():
discards = [] discards = []
if options.report_suffix: if options.report_suffix:
last_run = os.path.join(options.outdir, "twister_{}.csv".format(options.report_suffix)) last_run = os.path.join(options.outdir, "twister_{}.json".format(options.report_suffix))
else: else:
last_run = os.path.join(options.outdir, "twister.csv") last_run = os.path.join(options.outdir, "twister.json")
if options.quarantine_list: if options.quarantine_list:
suite.load_quarantine(options.quarantine_list) suite.load_quarantine(options.quarantine_list)
@ -1146,10 +1139,10 @@ def main():
suite.quarantine_verify = options.quarantine_verify suite.quarantine_verify = options.quarantine_verify
if options.only_failed: if options.only_failed:
suite.load_from_file(last_run, filter_status=['skipped', 'passed']) suite.load_from_file(last_run, filter_status=['skipped', 'passed', 'filtered'])
suite.selected_platforms = set(p.platform.name for p in suite.instances.values()) suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
elif options.load_tests: elif options.load_tests:
suite.load_from_file(options.load_tests, filter_status=['skipped', 'error']) suite.load_from_file(options.load_tests, filter_status=['skipped', 'error', 'filtered'])
suite.selected_platforms = set(p.platform.name for p in suite.instances.values()) suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
elif options.test_only: elif options.test_only:
# Get list of connected hardware and filter tests to only be run on connected hardware # Get list of connected hardware and filter tests to only be run on connected hardware
@ -1280,8 +1273,11 @@ def main():
suite.instances.update(skipped) suite.instances.update(skipped)
suite.instances.update(errors) suite.instances.update(errors)
suite.json_report(os.path.join(options.outdir, "testplan.json"))
if options.save_tests: if options.save_tests:
suite.csv_report(options.save_tests) suite.json_report(options.save_tests)
return return
logger.info("%d test scenarios (%d configurations) selected, %d configurations discarded due to filters." % logger.info("%d test scenarios (%d configurations) selected, %d configurations discarded due to filters." %