sanitycheck: Complete overhaul and job handling rework
A complete overhaul of the sanitycheck script and how we build and run tests. This new version of sanitycheck uses python for job distribution and drop use of Make. In addition to the move to python threading library, the following has been changed: - All handlers now run in parallel, meaning that any simulator will run in parallel and when testing on multiple devices (using --device-testing) the tests are run in parallel. - Lexicial filtering (using the filter keyword in yaml files) is now evaluated at runtime and is no long being pre-processed. This will allow us to immediately start executing tests and skip the wait time that was needed for filtering. - Device testing now supports multiple devices connected at the same time and is managed using a hardware map that needs to be generated and maintained for every test environment. (using --generate-hardware-map option). - Reports are not long stored in the Zephyr tree and instead stored in the output directory where all build artifacts are generated. - Each tested target now has a junit report in the output directory. - Recording option for performance data and other metrics is now available. This will allow us to record the output from the console and store the data for later processing. For example benchmark data can be captured and uploaded to a tracking server. - Test configurations (or instances) are no longer being sorted, this will help with balancing the load when we run sanitycheck on multiple hosts (as we do in CI). And many other cleanups and improvements... Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
ba696c0354
commit
83fc06a8fe
4 changed files with 1921 additions and 1636 deletions
|
@ -8,6 +8,8 @@ class Harness:
|
|||
GCOV_START = "GCOV_COVERAGE_DUMP_START"
|
||||
GCOV_END = "GCOV_COVERAGE_DUMP_END"
|
||||
FAULT = "ZEPHYR FATAL ERROR"
|
||||
RUN_PASSED = "PROJECT EXECUTION SUCCESSFUL"
|
||||
RUN_FAILED = "PROJECT EXECUTION FAILED"
|
||||
|
||||
def __init__(self):
|
||||
self.state = None
|
||||
|
@ -22,11 +24,14 @@ class Harness:
|
|||
self.fault = False
|
||||
self.capture_coverage = False
|
||||
self.next_pattern = 0
|
||||
self.record = None
|
||||
self.recording = []
|
||||
self.fieldnames = []
|
||||
|
||||
def configure(self, instance):
|
||||
config = instance.test.harness_config
|
||||
self.id = instance.test.id
|
||||
if "ignore_faults" in instance.test.tags:
|
||||
config = instance.testcase.harness_config
|
||||
self.id = instance.testcase.id
|
||||
if "ignore_faults" in instance.testcase.tags:
|
||||
self.fail_on_fault = False
|
||||
|
||||
if config:
|
||||
|
@ -34,6 +39,27 @@ class Harness:
|
|||
self.regex = config.get('regex', [])
|
||||
self.repeat = config.get('repeat', 1)
|
||||
self.ordered = config.get('ordered', True)
|
||||
self.record = config.get('record', {})
|
||||
|
||||
def process_test(self, line):
|
||||
|
||||
if self.RUN_PASSED in line:
|
||||
if self.fault:
|
||||
self.state = "failed"
|
||||
else:
|
||||
self.state = "passed"
|
||||
|
||||
if self.RUN_FAILED in line:
|
||||
self.state = "failed"
|
||||
|
||||
if self.fail_on_fault:
|
||||
if self.FAULT == line:
|
||||
self.fault = True
|
||||
|
||||
if self.GCOV_START in line:
|
||||
self.capture_coverage = True
|
||||
elif self.GCOV_END in line:
|
||||
self.capture_coverage = False
|
||||
|
||||
class Console(Harness):
|
||||
|
||||
|
@ -73,11 +99,26 @@ class Console(Harness):
|
|||
elif self.GCOV_END in line:
|
||||
self.capture_coverage = False
|
||||
|
||||
|
||||
if self.record:
|
||||
pattern = re.compile(self.record.get("regex", ""))
|
||||
match = pattern.search(line)
|
||||
if match:
|
||||
csv = []
|
||||
if not self.fieldnames:
|
||||
for k,v in match.groupdict().items():
|
||||
self.fieldnames.append(k)
|
||||
|
||||
for k,v in match.groupdict().items():
|
||||
csv.append(v.strip())
|
||||
self.recording.append(csv)
|
||||
|
||||
if self.state == "passed":
|
||||
self.tests[self.id] = "PASS"
|
||||
else:
|
||||
self.tests[self.id] = "FAIL"
|
||||
|
||||
self.process_test(line)
|
||||
|
||||
class Test(Harness):
|
||||
RUN_PASSED = "PROJECT EXECUTION SUCCESSFUL"
|
||||
|
@ -106,3 +147,5 @@ class Test(Harness):
|
|||
self.capture_coverage = True
|
||||
elif self.GCOV_END in line:
|
||||
self.capture_coverage = False
|
||||
|
||||
self.process_test(line)
|
||||
|
|
|
@ -62,6 +62,13 @@ mapping:
|
|||
required: no
|
||||
sequence:
|
||||
- type: str
|
||||
"record":
|
||||
type: map
|
||||
required: no
|
||||
mapping:
|
||||
"regex":
|
||||
type: str
|
||||
required: no
|
||||
"min_ram":
|
||||
type: int
|
||||
required: no
|
||||
|
@ -175,6 +182,13 @@ mapping:
|
|||
required: no
|
||||
sequence:
|
||||
- type: str
|
||||
"record":
|
||||
type: map
|
||||
required: no
|
||||
mapping:
|
||||
"regex":
|
||||
type: str
|
||||
required: no
|
||||
"min_ram":
|
||||
type: int
|
||||
required: no
|
3118
scripts/sanitycheck
3118
scripts/sanitycheck
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue