twister: harness: introduce shell harness
Introduce a new harness based on pytest that does basic shell command handling. The harness is enabeld using: harness: shell and expects a file with parameters in the form: test_shell_harness: - command: "kernel version" expected: "Zephyr version .*" - ... Multiple commands and their expected output can be tested. Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
4fc6f127e6
commit
d80e3f7687
6 changed files with 72 additions and 2 deletions
7
scripts/pylib/shell-twister-harness/conftest.py
Normal file
7
scripts/pylib/shell-twister-harness/conftest.py
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# Copyright (c) 2025 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_addoption(parser):
|
||||||
|
parser.addoption('--testdata')
|
36
scripts/pylib/shell-twister-harness/test_shell.py
Normal file
36
scripts/pylib/shell-twister-harness/test_shell.py
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
# Copyright (c) 2025 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import yaml
|
||||||
|
from twister_harness import Shell
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def testdata_path(request):
|
||||||
|
return request.config.getoption("--testdata")
|
||||||
|
|
||||||
|
|
||||||
|
def get_next_commands(testdata_path):
|
||||||
|
with open(testdata_path) as yaml_file:
|
||||||
|
data = yaml.safe_load(yaml_file)
|
||||||
|
for entry in data['test_shell_harness']:
|
||||||
|
yield entry['command'], entry['expected']
|
||||||
|
|
||||||
|
|
||||||
|
def test_shell_harness(shell: Shell, testdata_path):
|
||||||
|
for command, expected in get_next_commands(testdata_path):
|
||||||
|
logger.info('send command: %s', command)
|
||||||
|
lines = shell.exec_command(command)
|
||||||
|
match = False
|
||||||
|
for line in lines:
|
||||||
|
if re.match(expected, line):
|
||||||
|
match = True
|
||||||
|
break
|
||||||
|
assert match, 'expected response not found'
|
||||||
|
logger.info('response is valid')
|
|
@ -403,6 +403,7 @@ class Pytest(Harness):
|
||||||
f'--junit-xml={self.report_file}',
|
f'--junit-xml={self.report_file}',
|
||||||
f'--platform={self.instance.platform.name}'
|
f'--platform={self.instance.platform.name}'
|
||||||
]
|
]
|
||||||
|
|
||||||
command.extend([os.path.normpath(os.path.join(
|
command.extend([os.path.normpath(os.path.join(
|
||||||
self.source_dir, os.path.expanduser(os.path.expandvars(src)))) for src in pytest_root])
|
self.source_dir, os.path.expanduser(os.path.expandvars(src)))) for src in pytest_root])
|
||||||
|
|
||||||
|
@ -627,6 +628,19 @@ class Pytest(Harness):
|
||||||
self.status = TwisterStatus.SKIP
|
self.status = TwisterStatus.SKIP
|
||||||
self.instance.reason = 'No tests collected'
|
self.instance.reason = 'No tests collected'
|
||||||
|
|
||||||
|
class Shell(Pytest):
|
||||||
|
def generate_command(self):
|
||||||
|
config = self.instance.testsuite.harness_config
|
||||||
|
pytest_root = [os.path.join(ZEPHYR_BASE, 'scripts', 'pylib', 'shell-twister-harness')]
|
||||||
|
config['pytest_root'] = pytest_root
|
||||||
|
|
||||||
|
command = super().generate_command()
|
||||||
|
if config.get('shell_params_file'):
|
||||||
|
p_file = os.path.join(self.source_dir, config.get('shell_params_file'))
|
||||||
|
command.append(f'--testdata={p_file}')
|
||||||
|
else:
|
||||||
|
command.append(f'--testdata={os.path.join(self.source_dir, "test_shell.yml")}')
|
||||||
|
return command
|
||||||
|
|
||||||
class Gtest(Harness):
|
class Gtest(Harness):
|
||||||
ANSI_ESCAPE = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
|
ANSI_ESCAPE = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
|
||||||
|
|
|
@ -218,7 +218,16 @@ class TestInstance:
|
||||||
def testsuite_runnable(testsuite, fixtures):
|
def testsuite_runnable(testsuite, fixtures):
|
||||||
can_run = False
|
can_run = False
|
||||||
# console harness allows us to run the test and capture data.
|
# console harness allows us to run the test and capture data.
|
||||||
if testsuite.harness in ['console', 'ztest', 'pytest', 'test', 'gtest', 'robot', 'ctest']:
|
if testsuite.harness in [
|
||||||
|
'console',
|
||||||
|
'ztest',
|
||||||
|
'pytest',
|
||||||
|
'test',
|
||||||
|
'gtest',
|
||||||
|
'robot',
|
||||||
|
'ctest',
|
||||||
|
'shell'
|
||||||
|
]:
|
||||||
can_run = True
|
can_run = True
|
||||||
# if we have a fixture that is also being supplied on the
|
# if we have a fixture that is also being supplied on the
|
||||||
# command-line, then we need to run the test, not just build it.
|
# command-line, then we need to run the test, not just build it.
|
||||||
|
@ -304,7 +313,7 @@ class TestInstance:
|
||||||
device_testing)
|
device_testing)
|
||||||
|
|
||||||
# check if test is runnable in pytest
|
# check if test is runnable in pytest
|
||||||
if self.testsuite.harness == 'pytest':
|
if self.testsuite.harness in ['pytest', 'shell']:
|
||||||
target_ready = bool(
|
target_ready = bool(
|
||||||
filter == 'runnable' or simulator and simulator.name in SUPPORTED_SIMS_IN_PYTEST
|
filter == 'runnable' or simulator and simulator.name in SUPPORTED_SIMS_IN_PYTEST
|
||||||
)
|
)
|
||||||
|
|
|
@ -107,6 +107,9 @@ schema;scenario-schema:
|
||||||
type: map
|
type: map
|
||||||
required: false
|
required: false
|
||||||
mapping:
|
mapping:
|
||||||
|
"shell_params_file":
|
||||||
|
type: str
|
||||||
|
required: false
|
||||||
"type":
|
"type":
|
||||||
type: str
|
type: str
|
||||||
required: false
|
required: false
|
||||||
|
|
|
@ -18,6 +18,7 @@ from twisterlib.platform import Platform
|
||||||
def testinstance() -> TestInstance:
|
def testinstance() -> TestInstance:
|
||||||
testsuite = TestSuite('.', 'samples/hello', 'unit.test')
|
testsuite = TestSuite('.', 'samples/hello', 'unit.test')
|
||||||
testsuite.harness_config = {}
|
testsuite.harness_config = {}
|
||||||
|
testsuite.harness = 'pytest'
|
||||||
testsuite.ignore_faults = False
|
testsuite.ignore_faults = False
|
||||||
testsuite.sysbuild = False
|
testsuite.sysbuild = False
|
||||||
platform = Platform()
|
platform = Platform()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue