twister: Add support for gTest as a harness

Some 3P logic (Pigweed for example) is already heavily invested in
gTest as a testing framework. Adding the `gtest` harness enables
running the existing 3P tests on various Zephyr platforms and configs.

Signed-off-by: Yuval Peress <peress@google.com>
This commit is contained in:
Yuval Peress 2023-05-18 23:36:17 -06:00 committed by Anas Nashif
commit 64c41022e2
5 changed files with 348 additions and 4 deletions

View file

@ -408,8 +408,6 @@ harness: <string>
keyboard harness is set on tests that require keyboard interaction to reach keyboard harness is set on tests that require keyboard interaction to reach
verdict on whether a test has passed or failed, however, Twister lack this verdict on whether a test has passed or failed, however, Twister lack this
harness implementation at the momemnt. harness implementation at the momemnt.
The console harness tells Twister to parse a test's text output for a regex
defined in the test's YAML file.
Supported harnesses: Supported harnesses:
@ -417,6 +415,14 @@ harness: <string>
- test - test
- console - console
- pytest - pytest
- gtest
Harnesses ``ztest``, ``gtest`` and ``console`` are based on parsing of the
output and matching certain phrases. ``ztest`` and ``gtest`` harnesses look
for pass/fail/etc. frames defined in those frameworks. Use ``gtest``
harness if you've already got tests written in the gTest framework and do
not wish to update them to zTest. The ``console`` harness tells Twister to
parse a test's text output for a regex defined in the test's YAML file.
Some widely used harnesses that are not supported yet: Some widely used harnesses that are not supported yet:

View file

@ -369,6 +369,8 @@ Build system and infrastructure
``EXTRA_DTC_OVERLAY_FILE`` is new, see ``EXTRA_DTC_OVERLAY_FILE`` is new, see
:ref:`Set devicetree overlays <set-devicetree-overlays>` for further details. :ref:`Set devicetree overlays <set-devicetree-overlays>` for further details.
* Twister now supports ``gtest`` harness for running tests written in gTest.
Drivers and Sensors Drivers and Sensors
******************* *******************

View file

@ -12,7 +12,7 @@ logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG)
# pylint: disable=anomalous-backslash-in-string # pylint: disable=anomalous-backslash-in-string
result_re = re.compile(".*(PASS|FAIL|SKIP) - (test_)?(.*) in (\d*[.,]?\d*) seconds") result_re = re.compile(".*(PASS|FAIL|SKIP) - (test_)?(.*) in (\\d*[.,]?\\d*) seconds")
class Harness: class Harness:
GCOV_START = "GCOV_COVERAGE_DUMP_START" GCOV_START = "GCOV_COVERAGE_DUMP_START"
GCOV_END = "GCOV_COVERAGE_DUMP_END" GCOV_END = "GCOV_COVERAGE_DUMP_END"
@ -255,6 +255,91 @@ class Pytest(Harness):
log.close() log.close()
class Gtest(Harness):
ANSI_ESCAPE = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
TEST_START_PATTERN = r"\[ RUN \] (?P<suite_name>.*)\.(?P<test_name>.*)$"
TEST_PASS_PATTERN = r"\[ OK \] (?P<suite_name>.*)\.(?P<test_name>.*)$"
TEST_FAIL_PATTERN = r"\[ FAILED \] (?P<suite_name>.*)\.(?P<test_name>.*)$"
FINISHED_PATTERN = r"\[==========\] Done running all tests\.$"
has_failures = False
tc = None
def handle(self, line):
# Strip the ANSI characters, they mess up the patterns
non_ansi_line = self.ANSI_ESCAPE.sub('', line)
# Check if we started running a new test
test_start_match = re.search(self.TEST_START_PATTERN, non_ansi_line)
if test_start_match:
# Add the suite name
suite_name = test_start_match.group("suite_name")
if suite_name not in self.detected_suite_names:
self.detected_suite_names.append(suite_name)
# Generate the internal name of the test
name = "{}.{}.{}".format(self.id, suite_name, test_start_match.group("test_name"))
# Assert that we don't already have a running test
assert (
self.tc is None
), "gTest error, {} didn't finish".format(self.tc)
# Check that the instance doesn't exist yet (prevents re-running)
tc = self.instance.get_case_by_name(name)
assert tc is None, "gTest error, {} running twice".format(tc)
# Create the test instance and set the context
tc = self.instance.get_case_or_create(name)
self.tc = tc
self.tc.status = "started"
self.testcase_output += line + "\n"
self._match = True
# Check if the test run finished
finished_match = re.search(self.FINISHED_PATTERN, non_ansi_line)
if finished_match:
tc = self.instance.get_case_or_create(self.id)
if self.has_failures or self.tc is not None:
self.state = "failed"
tc.status = "failed"
else:
self.state = "passed"
tc.status = "passed"
return
# Check if the individual test finished
state, name = self._check_result(non_ansi_line)
if state is None or name is None:
# Nothing finished, keep processing lines
return
# Get the matching test and make sure it's the same as the current context
tc = self.instance.get_case_by_name(name)
assert (
tc is not None and tc == self.tc
), "gTest error, mismatched tests. Expected {} but got {}".format(self.tc, tc)
# Test finished, clear the context
self.tc = None
# Update the status of the test
tc.status = state
if tc.status == "failed":
self.has_failures = True
tc.output = self.testcase_output
self.testcase_output = ""
self._match = False
def _check_result(self, line):
test_pass_match = re.search(self.TEST_PASS_PATTERN, line)
if test_pass_match:
return "passed", "{}.{}.{}".format(self.id, test_pass_match.group("suite_name"), test_pass_match.group("test_name"))
test_fail_match = re.search(self.TEST_FAIL_PATTERN, line)
if test_fail_match:
return "failed", "{}.{}.{}".format(self.id, test_fail_match.group("suite_name"), test_fail_match.group("test_name"))
return None, None
class Test(Harness): class Test(Harness):
RUN_PASSED = "PROJECT EXECUTION SUCCESSFUL" RUN_PASSED = "PROJECT EXECUTION SUCCESSFUL"
RUN_FAILED = "PROJECT EXECUTION FAILED" RUN_FAILED = "PROJECT EXECUTION FAILED"

View file

@ -28,6 +28,8 @@ class TestInstance:
out directory used is <outdir>/<platform>/<test case name> out directory used is <outdir>/<platform>/<test case name>
""" """
__test__ = False
def __init__(self, testsuite, platform, outdir): def __init__(self, testsuite, platform, outdir):
self.testsuite = testsuite self.testsuite = testsuite
@ -127,7 +129,7 @@ class TestInstance:
def testsuite_runnable(testsuite, fixtures): def testsuite_runnable(testsuite, fixtures):
can_run = False can_run = False
# console harness allows us to run the test and capture data. # console harness allows us to run the test and capture data.
if testsuite.harness in [ 'console', 'ztest', 'pytest', 'test']: if testsuite.harness in [ 'console', 'ztest', 'pytest', 'test', 'gtest']:
can_run = True can_run = True
# if we have a fixture that is also being supplied on the # if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it. # command-line, then we need to run the test, not just build it.

View file

@ -0,0 +1,249 @@
#!/usr/bin/env python3
# Copyright(c) 2023 Google LLC
# SPDX-License-Identifier: Apache-2.0
"""
This test file contains testsuites for the Harness classes of twister
"""
import mock
import sys
import os
import pytest
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
from twisterlib.harness import Gtest
from twisterlib.testinstance import TestInstance
GTEST_START_STATE = " RUN "
GTEST_PASS_STATE = " OK "
GTEST_FAIL_STATE = " FAILED "
SAMPLE_GTEST_START = (
"[00:00:00.000,000] <inf> label: [==========] Running all tests."
)
SAMPLE_GTEST_FMT = "[00:00:00.000,000] <inf> label: [{state}] {suite}.{test}"
SAMPLE_GTEST_END = (
"[00:00:00.000,000] <inf> label: [==========] Done running all tests."
)
def process_logs(harness, logs):
for line in logs:
harness.handle(line)
@pytest.fixture
def gtest():
mock_platform = mock.Mock()
mock_platform.name = "mock_platform"
mock_testsuite = mock.Mock()
mock_testsuite.name = "mock_testsuite"
mock_testsuite.id = "id"
mock_testsuite.testcases = []
instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir="")
harness = Gtest()
harness.configure(instance)
return harness
def test_gtest_start_test_no_suites_detected(gtest):
process_logs(gtest, [SAMPLE_GTEST_START])
assert len(gtest.detected_suite_names) == 0
assert gtest.state is None
def test_gtest_start_test(gtest):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test_name"
),
],
)
assert gtest.state is None
assert len(gtest.detected_suite_names) == 1
assert gtest.detected_suite_names[0] == "suite_name"
assert gtest.instance.get_case_by_name("id.suite_name.test_name") is not None
assert (
gtest.instance.get_case_by_name("id.suite_name.test_name").status == "started"
)
def test_gtest_pass(gtest):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test_name"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test_name"
),
],
)
assert gtest.state is None
assert len(gtest.detected_suite_names) == 1
assert gtest.detected_suite_names[0] == "suite_name"
assert gtest.instance.get_case_by_name("id.suite_name.test_name") is not None
assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == "passed"
def test_gtest_failed(gtest):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test_name"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_FAIL_STATE, suite="suite_name", test="test_name"
),
],
)
assert gtest.state is None
assert len(gtest.detected_suite_names) == 1
assert gtest.detected_suite_names[0] == "suite_name"
assert gtest.instance.get_case_by_name("id.suite_name.test_name") is not None
assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == "failed"
def test_gtest_all_pass(gtest):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test_name"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test_name"
),
SAMPLE_GTEST_END,
],
)
assert gtest.state == "passed"
assert len(gtest.detected_suite_names) == 1
assert gtest.detected_suite_names[0] == "suite_name"
assert gtest.instance.get_case_by_name("id.suite_name.test_name") is not None
assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == "passed"
def test_gtest_one_fail(gtest):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test1"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_FAIL_STATE, suite="suite_name", test="test1"
),
SAMPLE_GTEST_END,
],
)
assert gtest.state == "failed"
assert len(gtest.detected_suite_names) == 1
assert gtest.detected_suite_names[0] == "suite_name"
assert gtest.instance.get_case_by_name("id.suite_name.test0") is not None
assert gtest.instance.get_case_by_name("id.suite_name.test0").status == "passed"
assert gtest.instance.get_case_by_name("id.suite_name.test1") is not None
assert gtest.instance.get_case_by_name("id.suite_name.test1").status == "failed"
def test_gtest_missing_result(gtest):
with pytest.raises(
AssertionError,
match=r"gTest error, id.suite_name.test0 didn't finish",
):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test1"
),
],
)
def test_gtest_mismatch_result(gtest):
with pytest.raises(
AssertionError,
match=r"gTest error, mismatched tests. Expected id.suite_name.test0 but got None",
):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test1"
),
],
)
def test_gtest_repeated_result(gtest):
with pytest.raises(
AssertionError,
match=r"gTest error, mismatched tests. Expected id.suite_name.test1 but got id.suite_name.test0",
):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test1"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test0"
),
],
)
def test_gtest_repeated_run(gtest):
with pytest.raises(
AssertionError,
match=r"gTest error, id.suite_name.test0 running twice",
):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
],
)