sanitycheck: Add a feature which can handle pytest script.

1. Add a class of Pytest(Harness) which can handle pytest script
in harness.py
2. Use running_dir to store current test directory which be
used in pytest scrpt.
3. Add usage of this feature into zephyr doc.

Signed-off-by: YouhuaX Zhu <youhuax.zhu@intel.com>
This commit is contained in:
YouhuaX Zhu 2020-10-22 09:37:27 +08:00 committed by Anas Nashif
commit 965c8b9888
4 changed files with 137 additions and 2 deletions

View file

@ -329,7 +329,7 @@ harness: <string>
simple as a loopback wiring or a complete hardware test setup for
sensor and IO testing.
Usually pertains to external dependency domains but can be anything such as
console, sensor, net, keyboard, or Bluetooth.
console, sensor, net, keyboard, Bluetooth or pytest.
harness_config: <harness configuration options>
Extra harness configuration options to be used to select a board and/or
@ -369,6 +369,11 @@ harness_config: <harness configuration options>
Only one fixture can be defined per testcase.
pytest_root: <pytest dirctory> (default pytest)
Specify a pytest directory which need to excute when test case begin to running,
default pytest directory name is pytest, after pytest finished, twister will
check if this case pass or fail according the pytest report.
The following is an example yaml file with a few harness_config options.
::
@ -390,6 +395,18 @@ harness_config: <harness configuration options>
tags: sensors
depends_on: i2c
The following is an example yaml file with pytest harness_config options,
default pytest_root name "pytest" will be used if pytest_root not specified.
please refer the example in samples/subsys/testsuite/pytest/.
::
tests:
pytest.example:
harness: pytest
harness_config:
pytest_root: [pytest directory name]
filter: <expression>
Filter whether the testcase should be run by evaluating an expression
against an environment containing the following values:

View file

@ -1,6 +1,9 @@
# SPDX-License-Identifier: Apache-2.0
import re
import os
import subprocess
from collections import OrderedDict
import xml.etree.ElementTree as ET
result_re = re.compile(".*(PASS|FAIL|SKIP) - (test_)?(.*) in")
@ -28,6 +31,7 @@ class Harness:
self.recording = []
self.fieldnames = []
self.ztest = False
self.is_pytest = False
def configure(self, instance):
config = instance.testcase.harness_config
@ -121,6 +125,92 @@ class Console(Harness):
else:
self.tests[self.id] = "FAIL"
class Pytest(Harness):
def configure(self, instance):
super(Pytest, self).configure(instance)
self.running_dir = instance.build_dir
self.source_dir = instance.testcase.source_dir
self.pytest_root = 'pytest'
self.is_pytest = True
config = instance.testcase.harness_config
if config:
self.pytest_root = config.get('pytest_root', 'pytest')
def handle(self, line):
''' Test cases that make use of pytest more care about results given
by pytest tool which is called in pytest_run(), so works of this
handle is trying to give a PASS or FAIL to avoid timeout, nothing
is writen into handler.log
'''
self.state = "passed"
self.tests[self.id] = "PASS"
def pytest_run(self, log_file):
''' To keep artifacts of pytest in self.running_dir, pass this directory
by "--cmdopt". On pytest end, add a command line option and provide
the cmdopt through a fixture function
If pytest harness report failure, twister will direct user to see
handler.log, this method writes test result in handler.log
'''
cmd = [
'pytest',
'-s',
os.path.join(self.source_dir, self.pytest_root),
'--cmdopt',
self.running_dir,
'--junit-xml',
os.path.join(self.running_dir, 'report.xml'),
'-q'
]
log = open(log_file, "a")
outs = []
errs = []
with subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE) as proc:
try:
outs, errs = proc.communicate()
tree = ET.parse(os.path.join(self.running_dir, "report.xml"))
root = tree.getroot()
for child in root:
if child.tag == 'testsuite':
if child.attrib['failures'] != '0':
self.state = "failed"
elif child.attrib['skipped'] != '0':
self.state = "skipped"
elif child.attrib['errors'] != '0':
self.state = "errors"
else:
self.state = "passed"
except subprocess.TimeoutExpired:
proc.kill()
self.state = "failed"
except ET.ParseError:
self.state = "failed"
except IOError:
log.write("Can't access report.xml\n")
self.state = "failed"
if self.state == "passed":
self.tests[self.id] = "PASS"
log.write("Pytest cases passed\n")
elif self.state == "skipped":
self.tests[self.id] = "SKIP"
log.write("Pytest cases skipped\n")
log.write("Please refer report.xml for detail")
else:
self.tests[self.id] = "FAIL"
log.write("Pytest cases failed\n")
log.write("\nOutput from pytest:\n")
log.write(outs.decode('UTF-8'))
log.write(errs.decode('UTF-8'))
log.close()
class Test(Harness):
RUN_PASSED = "PROJECT EXECUTION SUCCESSFUL"
RUN_FAILED = "PROJECT EXECUTION FAILED"

View file

@ -468,6 +468,10 @@ class BinaryHandler(Handler):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
@ -567,6 +571,8 @@ class BinaryHandler(Handler):
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
@ -600,6 +606,10 @@ class DeviceHandler(Handler):
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
@ -858,6 +868,8 @@ class DeviceHandler(Handler):
elif out_state == "flash_error":
self.instance.reason = "Flash error"
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.results = harness.tests
# sometimes a test instance hasn't been executed successfully with an
@ -978,6 +990,11 @@ class QEMUHandler(Handler):
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
if harness.is_pytest:
harness.handle(None)
out_state = harness.state
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
@ -1021,6 +1038,10 @@ class QEMUHandler(Handler):
timeout_time = time.time() + 2
line = ""
if harness.is_pytest:
harness.pytest_run(logfile)
out_state = harness.state
handler.record(harness)
handler_time = time.time() - start_time
@ -1070,6 +1091,7 @@ class QEMUHandler(Handler):
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
@ -1775,7 +1797,7 @@ class TestInstance(DisablePyTestCollectionMixin):
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest']:
if testcase.harness in [ 'console', 'ztest', 'pytest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.

View file

@ -62,6 +62,9 @@ mapping:
"repeat":
type: int
required: no
"pytest_root":
type: str
required: no
"regex":
type: seq
required: no
@ -187,6 +190,9 @@ mapping:
"repeat":
type: int
required: no
"pytest_root":
type: str
required: no
"regex":
type: seq
required: no