twister: report: capture failures per testcase
Capture failures per testcase so that we can include the output in reports for only failed testcases and not include the whole log which can be overwhelming. Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
b86895adbb
commit
26e3c30234
2 changed files with 19 additions and 2 deletions
|
@ -25,7 +25,7 @@ class Harness:
|
|||
'PASS': 'passed',
|
||||
'SKIP': 'skipped',
|
||||
'BLOCK': 'blocked',
|
||||
'FAIL': 'failure'
|
||||
'FAIL': 'failed'
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
|
@ -51,6 +51,8 @@ class Harness:
|
|||
self.matched_run_id = False
|
||||
self.run_id_exists = False
|
||||
self.instance = None
|
||||
self.testcase_output = ""
|
||||
self._match = False
|
||||
|
||||
def configure(self, instance):
|
||||
self.instance = instance
|
||||
|
@ -251,6 +253,7 @@ class Test(Harness):
|
|||
RUN_PASSED = "PROJECT EXECUTION SUCCESSFUL"
|
||||
RUN_FAILED = "PROJECT EXECUTION FAILED"
|
||||
test_suite_start_pattern = r"Running TESTSUITE (?P<suite_name>.*)"
|
||||
ZTEST_START_PATTERN = r"START - (test_)?(.*)"
|
||||
|
||||
def handle(self, line):
|
||||
test_suite_match = re.search(self.test_suite_start_pattern, line)
|
||||
|
@ -258,6 +261,11 @@ class Test(Harness):
|
|||
suite_name = test_suite_match.group("suite_name")
|
||||
self.detected_suite_names.append(suite_name)
|
||||
|
||||
testcase_match = re.search(self.ZTEST_START_PATTERN, line)
|
||||
if testcase_match or self._match:
|
||||
self.testcase_output += line + "\n"
|
||||
self._match = True
|
||||
|
||||
match = result_re.match(line)
|
||||
|
||||
if match and match.group(2):
|
||||
|
@ -269,6 +277,10 @@ class Test(Harness):
|
|||
if tc.status == "skipped":
|
||||
tc.reason = "ztest skip"
|
||||
tc.duration = float(match.group(4))
|
||||
if tc.status == "failed":
|
||||
tc.output = self.testcase_output
|
||||
self.testcase_output = ""
|
||||
self._match = False
|
||||
self.ztest = True
|
||||
|
||||
self.process_test(line)
|
||||
|
|
|
@ -1734,6 +1734,7 @@ class TestCase(DisablePyTestCollectionMixin):
|
|||
self.status = None
|
||||
self.reason = None
|
||||
self.testsuite = testsuite
|
||||
self.output = ""
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.name < other.name
|
||||
|
@ -3952,7 +3953,8 @@ class TestPlan(DisablePyTestCollectionMixin):
|
|||
for tc in ts.get("testcases", []):
|
||||
status = tc.get('status', ts_status)
|
||||
reason = tc.get('reason', ts.get('reason'))
|
||||
log = ts.get("log")
|
||||
log = tc.get("log", ts.get("log"))
|
||||
|
||||
tc_duration = tc.get('execution_time', handler_time)
|
||||
name = tc.get("identifier")
|
||||
classname = ".".join(name.split(".")[:2])
|
||||
|
@ -4044,6 +4046,9 @@ class TestPlan(DisablePyTestCollectionMixin):
|
|||
testcase = {}
|
||||
testcase['identifier'] = case.name
|
||||
testcase['execution_time'] = case.duration
|
||||
if case.output != "":
|
||||
testcase['log'] = case.output
|
||||
|
||||
if case.status == "skipped":
|
||||
if instance.status != "filtered":
|
||||
testcase["status"] = "skipped"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue