twister: fix twister testsuite
Make testsuite work again. Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
08caa71de7
commit
9437e6d963
10 changed files with 716 additions and 864 deletions
2
.github/workflows/twister_tests.yml
vendored
2
.github/workflows/twister_tests.yml
vendored
|
@ -42,7 +42,7 @@ jobs:
|
|||
${{ runner.os }}-pip-${{ matrix.python-version }}
|
||||
- name: install-packages
|
||||
run: |
|
||||
pip3 install pytest colorama pyyaml ply mock
|
||||
pip3 install pytest colorama pyyaml ply mock pykwalify
|
||||
- name: Run pytest
|
||||
env:
|
||||
ZEPHYR_BASE: ./
|
||||
|
|
|
@ -12,6 +12,7 @@ import logging
|
|||
import subprocess
|
||||
import shutil
|
||||
import re
|
||||
import argparse
|
||||
|
||||
logger = logging.getLogger('twister')
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
@ -31,13 +32,617 @@ if not ZEPHYR_BASE:
|
|||
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
|
||||
|
||||
|
||||
def parse_arguments(args):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
parser.fromfile_prefix_chars = "+"
|
||||
|
||||
case_select = parser.add_argument_group("Test case selection",
|
||||
"""
|
||||
Artificially long but functional example:
|
||||
$ ./scripts/twister -v \\
|
||||
--testsuite-root tests/ztest/base \\
|
||||
--testsuite-root tests/kernel \\
|
||||
--test tests/ztest/base/testing.ztest.verbose_0 \\
|
||||
--test tests/kernel/fifo/fifo_api/kernel.fifo
|
||||
|
||||
"kernel.fifo.poll" is one of the test section names in
|
||||
__/fifo_api/testcase.yaml
|
||||
""")
|
||||
|
||||
compare_group_option = parser.add_mutually_exclusive_group()
|
||||
|
||||
platform_group_option = parser.add_mutually_exclusive_group()
|
||||
|
||||
run_group_option = parser.add_mutually_exclusive_group()
|
||||
|
||||
serial = parser.add_mutually_exclusive_group(required="--device-testing" in sys.argv)
|
||||
|
||||
test_or_build = parser.add_mutually_exclusive_group()
|
||||
|
||||
test_xor_subtest = case_select.add_mutually_exclusive_group()
|
||||
|
||||
test_xor_generator = case_select.add_mutually_exclusive_group()
|
||||
|
||||
valgrind_asan_group = parser.add_mutually_exclusive_group()
|
||||
|
||||
case_select.add_argument(
|
||||
"-E",
|
||||
"--save-tests",
|
||||
metavar="FILENAME",
|
||||
action="store",
|
||||
help="Append list of tests and platforms to be run to file.")
|
||||
|
||||
case_select.add_argument(
|
||||
"-F",
|
||||
"--load-tests",
|
||||
metavar="FILENAME",
|
||||
action="store",
|
||||
help="Load list of tests and platforms to be run from file.")
|
||||
|
||||
case_select.add_argument(
|
||||
"-T", "--testsuite-root", action="append", default=[],
|
||||
help="Base directory to recursively search for test cases. All "
|
||||
"testcase.yaml files under here will be processed. May be "
|
||||
"called multiple times. Defaults to the 'samples/' and "
|
||||
"'tests/' directories at the base of the Zephyr tree.")
|
||||
|
||||
case_select.add_argument(
|
||||
"-f",
|
||||
"--only-failed",
|
||||
action="store_true",
|
||||
help="Run only those tests that failed the previous twister run "
|
||||
"invocation.")
|
||||
|
||||
case_select.add_argument("--list-tests", action="store_true",
|
||||
help="""List of all sub-test functions recursively found in
|
||||
all --testsuite-root arguments. Note different sub-tests can share
|
||||
the same section name and come from different directories.
|
||||
The output is flattened and reports --sub-test names only,
|
||||
not their directories. For instance net.socket.getaddrinfo_ok
|
||||
and net.socket.fd_set belong to different directories.
|
||||
""")
|
||||
|
||||
case_select.add_argument("--list-test-duplicates", action="store_true",
|
||||
help="""List tests with duplicate identifiers.
|
||||
""")
|
||||
|
||||
case_select.add_argument("--test-tree", action="store_true",
|
||||
help="""Output the test plan in a tree form""")
|
||||
|
||||
compare_group_option.add_argument("--compare-report",
|
||||
help="Use this report file for size comparison")
|
||||
|
||||
compare_group_option.add_argument(
|
||||
"-m", "--last-metrics", action="store_true",
|
||||
help="Compare with the results of the previous twister "
|
||||
"invocation")
|
||||
|
||||
platform_group_option.add_argument(
|
||||
"-G",
|
||||
"--integration",
|
||||
action="store_true",
|
||||
help="Run integration tests")
|
||||
|
||||
platform_group_option.add_argument(
|
||||
"--emulation-only", action="store_true",
|
||||
help="Only build and run emulation platforms")
|
||||
|
||||
run_group_option.add_argument(
|
||||
"--device-testing", action="store_true",
|
||||
help="Test on device directly. Specify the serial device to "
|
||||
"use with the --device-serial option.")
|
||||
|
||||
run_group_option.add_argument("--generate-hardware-map",
|
||||
help="""Probe serial devices connected to this platform
|
||||
and create a hardware map file to be used with
|
||||
--device-testing
|
||||
""")
|
||||
|
||||
serial.add_argument("--device-serial",
|
||||
help="""Serial device for accessing the board
|
||||
(e.g., /dev/ttyACM0)
|
||||
""")
|
||||
|
||||
serial.add_argument("--device-serial-pty",
|
||||
help="""Script for controlling pseudoterminal.
|
||||
Twister believes that it interacts with a terminal
|
||||
when it actually interacts with the script.
|
||||
|
||||
E.g "twister --device-testing
|
||||
--device-serial-pty <script>
|
||||
""")
|
||||
|
||||
serial.add_argument("--hardware-map",
|
||||
help="""Load hardware map from a file. This will be used
|
||||
for testing on hardware that is listed in the file.
|
||||
""")
|
||||
|
||||
test_or_build.add_argument(
|
||||
"-b", "--build-only", action="store_true",
|
||||
help="Only build the code, do not execute any of it in QEMU")
|
||||
|
||||
test_or_build.add_argument(
|
||||
"--test-only", action="store_true",
|
||||
help="""Only run device tests with current artifacts, do not build
|
||||
the code""")
|
||||
|
||||
test_xor_subtest.add_argument(
|
||||
"-s", "--test", action="append",
|
||||
help="Run only the specified testsuite scenario. These are named by "
|
||||
"<path/relative/to/Zephyr/base/section.name.in.testcase.yaml>")
|
||||
|
||||
test_xor_subtest.add_argument(
|
||||
"--sub-test", action="append",
|
||||
help="""Recursively find sub-test functions and run the entire
|
||||
test section where they were found, including all sibling test
|
||||
functions. Sub-tests are named by:
|
||||
section.name.in.testcase.yaml.function_name_without_test_prefix
|
||||
Example: In kernel.fifo.fifo_loop: 'kernel.fifo' is a section name
|
||||
and 'fifo_loop' is a name of a function found in main.c without test prefix.
|
||||
""")
|
||||
|
||||
valgrind_asan_group.add_argument(
|
||||
"--enable-valgrind", action="store_true",
|
||||
help="""Run binary through valgrind and check for several memory access
|
||||
errors. Valgrind needs to be installed on the host. This option only
|
||||
works with host binaries such as those generated for the native_posix
|
||||
configuration and is mutual exclusive with --enable-asan.
|
||||
""")
|
||||
|
||||
valgrind_asan_group.add_argument(
|
||||
"--enable-asan", action="store_true",
|
||||
help="""Enable address sanitizer to check for several memory access
|
||||
errors. Libasan needs to be installed on the host. This option only
|
||||
works with host binaries such as those generated for the native_posix
|
||||
configuration and is mutual exclusive with --enable-valgrind.
|
||||
""")
|
||||
|
||||
# Start of individual args place them in alpha-beta order
|
||||
|
||||
board_root_list = ["%s/boards" % ZEPHYR_BASE,
|
||||
"%s/scripts/pylib/twister/boards" % ZEPHYR_BASE]
|
||||
|
||||
parser.add_argument(
|
||||
"-A", "--board-root", action="append", default=board_root_list,
|
||||
help="""Directory to search for board configuration files. All .yaml
|
||||
files in the directory will be processed. The directory should have the same
|
||||
structure in the main Zephyr tree: boards/<arch>/<board_name>/""")
|
||||
|
||||
parser.add_argument(
|
||||
"-a", "--arch", action="append",
|
||||
help="Arch filter for testing. Takes precedence over --platform. "
|
||||
"If unspecified, test all arches. Multiple invocations "
|
||||
"are treated as a logical 'or' relationship")
|
||||
|
||||
parser.add_argument(
|
||||
"-B", "--subset",
|
||||
help="Only run a subset of the tests, 1/4 for running the first 25%%, "
|
||||
"3/5 means run the 3rd fifth of the total. "
|
||||
"This option is useful when running a large number of tests on "
|
||||
"different hosts to speed up execution time.")
|
||||
|
||||
parser.add_argument("-C", "--coverage", action="store_true",
|
||||
help="Generate coverage reports. Implies "
|
||||
"--enable-coverage.")
|
||||
|
||||
parser.add_argument(
|
||||
"-c", "--clobber-output", action="store_true",
|
||||
help="Cleaning the output directory will simply delete it instead "
|
||||
"of the default policy of renaming.")
|
||||
|
||||
parser.add_argument(
|
||||
"--cmake-only", action="store_true",
|
||||
help="Only run cmake, do not build or run.")
|
||||
|
||||
parser.add_argument("--coverage-basedir", default=ZEPHYR_BASE,
|
||||
help="Base source directory for coverage report.")
|
||||
|
||||
parser.add_argument("--coverage-platform", action="append", default=[],
|
||||
help="Platforms to run coverage reports on. "
|
||||
"This option may be used multiple times. "
|
||||
"Default to what was selected with --platform.")
|
||||
|
||||
parser.add_argument("--coverage-tool", choices=['lcov', 'gcovr'], default='lcov',
|
||||
help="Tool to use to generate coverage report.")
|
||||
|
||||
parser.add_argument(
|
||||
"-D", "--all-deltas", action="store_true",
|
||||
help="Show all footprint deltas, positive or negative. Implies "
|
||||
"--footprint-threshold=0")
|
||||
|
||||
parser.add_argument(
|
||||
"--device-serial-baud", action="store", default=None,
|
||||
help="Serial device baud rate (default 115200)")
|
||||
|
||||
parser.add_argument("--disable-asserts", action="store_false",
|
||||
dest="enable_asserts",
|
||||
help="deprecated, left for compatibility")
|
||||
|
||||
parser.add_argument(
|
||||
"--disable-unrecognized-section-test", action="store_true",
|
||||
default=False,
|
||||
help="Skip the 'unrecognized section' test.")
|
||||
|
||||
parser.add_argument(
|
||||
"--disable-suite-name-check", action="store_true", default=False,
|
||||
help="Disable extended test suite name verification at the beginning "
|
||||
"of Ztest test. This option could be useful for tests or "
|
||||
"platforms, which from some reasons cannot print early logs.")
|
||||
|
||||
parser.add_argument("-e", "--exclude-tag", action="append",
|
||||
help="Specify tags of tests that should not run. "
|
||||
"Default is to run all tests with all tags.")
|
||||
|
||||
parser.add_argument("--enable-coverage", action="store_true",
|
||||
help="Enable code coverage using gcov.")
|
||||
|
||||
parser.add_argument(
|
||||
"--enable-lsan", action="store_true",
|
||||
help="""Enable leak sanitizer to check for heap memory leaks.
|
||||
Libasan needs to be installed on the host. This option only
|
||||
works with host binaries such as those generated for the native_posix
|
||||
configuration and when --enable-asan is given.
|
||||
""")
|
||||
|
||||
parser.add_argument(
|
||||
"--enable-ubsan", action="store_true",
|
||||
help="""Enable undefined behavior sanitizer to check for undefined
|
||||
behaviour during program execution. It uses an optional runtime library
|
||||
to provide better error diagnostics. This option only works with host
|
||||
binaries such as those generated for the native_posix configuration.
|
||||
""")
|
||||
|
||||
parser.add_argument("--enable-size-report", action="store_true",
|
||||
help="Enable expensive computation of RAM/ROM segment sizes.")
|
||||
|
||||
parser.add_argument(
|
||||
"--filter", choices=['buildable', 'runnable'],
|
||||
default='buildable',
|
||||
help="""Filter tests to be built and executed. By default everything is
|
||||
built and if a test is runnable (emulation or a connected device), it
|
||||
is run. This option allows for example to only build tests that can
|
||||
actually be run. Runnable is a subset of buildable.""")
|
||||
|
||||
parser.add_argument("--force-color", action="store_true",
|
||||
help="Always output ANSI color escape sequences "
|
||||
"even when the output is redirected (not a tty)")
|
||||
|
||||
parser.add_argument("--force-toolchain", action="store_true",
|
||||
help="Do not filter based on toolchain, use the set "
|
||||
" toolchain unconditionally")
|
||||
|
||||
parser.add_argument("--gcov-tool", default=None,
|
||||
help="Path to the gcov tool to use for code coverage "
|
||||
"reports")
|
||||
|
||||
parser.add_argument(
|
||||
"-H", "--footprint-threshold", type=float, default=5,
|
||||
help="When checking test case footprint sizes, warn the user if "
|
||||
"the new app size is greater then the specified percentage "
|
||||
"from the last release. Default is 5. 0 to warn on any "
|
||||
"increase on app size.")
|
||||
|
||||
parser.add_argument(
|
||||
"-i", "--inline-logs", action="store_true",
|
||||
help="Upon test failure, print relevant log data to stdout "
|
||||
"instead of just a path to it.")
|
||||
|
||||
parser.add_argument(
|
||||
"-j", "--jobs", type=int,
|
||||
help="Number of jobs for building, defaults to number of CPU threads, "
|
||||
"overcommitted by factor 2 when --build-only.")
|
||||
|
||||
parser.add_argument(
|
||||
"-K", "--force-platform", action="store_true",
|
||||
help="""Force testing on selected platforms,
|
||||
even if they are excluded in the test configuration (testcase.yaml)."""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-l", "--all", action="store_true",
|
||||
help="Build/test on all platforms. Any --platform arguments "
|
||||
"ignored.")
|
||||
|
||||
parser.add_argument("--list-tags", action="store_true",
|
||||
help="List all tags occurring in selected tests.")
|
||||
|
||||
parser.add_argument("--log-file", metavar="FILENAME", action="store",
|
||||
help="Specify a file where to save logs.")
|
||||
|
||||
parser.add_argument(
|
||||
"-M", "--runtime-artifact-cleanup", action="store_true",
|
||||
help="Delete artifacts of passing tests.")
|
||||
|
||||
test_xor_generator.add_argument(
|
||||
"-N", "--ninja", action="store_true", default="--make" not in sys.argv,
|
||||
help="Use the Ninja generator with CMake. (This is the default)",
|
||||
required="--short-build-path" in sys.argv)
|
||||
|
||||
test_xor_generator.add_argument(
|
||||
"-k", "--make", action="store_true",
|
||||
help="Use the unix Makefile generator with CMake.")
|
||||
|
||||
parser.add_argument(
|
||||
"-n", "--no-clean", action="store_true",
|
||||
help="Re-use the outdir before building. Will result in "
|
||||
"faster compilation since builds will be incremental.")
|
||||
|
||||
# To be removed in favor of --detailed-skipped-report
|
||||
parser.add_argument(
|
||||
"--no-skipped-report", action="store_true",
|
||||
help="""Do not report skipped test cases in junit output. [Experimental]
|
||||
""")
|
||||
|
||||
parser.add_argument(
|
||||
"--detailed-skipped-report", action="store_true",
|
||||
help="Generate a detailed report with all skipped test cases"
|
||||
"including those that are filtered based on testsuite definition."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-O", "--outdir",
|
||||
default=os.path.join(os.getcwd(), "twister-out"),
|
||||
help="Output directory for logs and binaries. "
|
||||
"Default is 'twister-out' in the current directory. "
|
||||
"This directory will be cleaned unless '--no-clean' is set. "
|
||||
"The '--clobber-output' option controls what cleaning does.")
|
||||
|
||||
parser.add_argument(
|
||||
"-o", "--report-dir",
|
||||
help="""Output reports containing results of the test run into the
|
||||
specified directory.
|
||||
The output will be both in JSON and JUNIT format
|
||||
(twister.json and twister.xml).
|
||||
""")
|
||||
|
||||
parser.add_argument("--overflow-as-errors", action="store_true",
|
||||
help="Treat RAM/SRAM overflows as errors.")
|
||||
|
||||
|
||||
parser.add_argument("-P", "--exclude-platform", action="append", default=[],
|
||||
help="""Exclude platforms and do not build or run any tests
|
||||
on those platforms. This option can be called multiple times.
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument("--persistent-hardware-map", action='store_true',
|
||||
help="""With --generate-hardware-map, tries to use
|
||||
persistent names for serial devices on platforms
|
||||
that support this feature (currently only Linux).
|
||||
""")
|
||||
|
||||
parser.add_argument(
|
||||
"-p", "--platform", action="append",
|
||||
help="Platform filter for testing. This option may be used multiple "
|
||||
"times. Test suites will only be built/run on the platforms "
|
||||
"specified. If this option is not used, then platforms marked "
|
||||
"as default in the platform metadata file will be chosen "
|
||||
"to build and test. ")
|
||||
|
||||
parser.add_argument(
|
||||
"--platform-reports", action="store_true",
|
||||
help="""Create individual reports for each platform.
|
||||
""")
|
||||
|
||||
parser.add_argument("--pre-script",
|
||||
help="""specify a pre script. This will be executed
|
||||
before device handler open serial port and invoke runner.
|
||||
""")
|
||||
|
||||
parser.add_argument("-Q", "--error-on-deprecations", action="store_false",
|
||||
help="Error on deprecation warnings.")
|
||||
|
||||
parser.add_argument(
|
||||
"--quarantine-list",
|
||||
metavar="FILENAME",
|
||||
help="Load list of test scenarios under quarantine. The entries in "
|
||||
"the file need to correspond to the test scenarios names as in "
|
||||
"corresponding tests .yaml files. These scenarios "
|
||||
"will be skipped with quarantine as the reason.")
|
||||
|
||||
parser.add_argument(
|
||||
"--quarantine-verify",
|
||||
action="store_true",
|
||||
help="Use the list of test scenarios under quarantine and run them"
|
||||
"to verify their current status.")
|
||||
|
||||
parser.add_argument("-R", "--enable-asserts", action="store_true",
|
||||
default=True,
|
||||
help="deprecated, left for compatibility")
|
||||
|
||||
parser.add_argument("--report-excluded",
|
||||
action="store_true",
|
||||
help="""List all tests that are never run based on current scope and
|
||||
coverage. If you are looking for accurate results, run this with
|
||||
--all, but this will take a while...""")
|
||||
|
||||
parser.add_argument(
|
||||
"--report-name",
|
||||
help="""Create a report with a custom name.
|
||||
""")
|
||||
|
||||
parser.add_argument(
|
||||
"--report-suffix",
|
||||
help="""Add a suffix to all generated file names, for example to add a
|
||||
version or a commit ID.
|
||||
""")
|
||||
|
||||
parser.add_argument(
|
||||
"--retry-failed", type=int, default=0,
|
||||
help="Retry failing tests again, up to the number of times specified.")
|
||||
|
||||
parser.add_argument(
|
||||
"--retry-interval", type=int, default=60,
|
||||
help="Retry failing tests after specified period of time.")
|
||||
|
||||
parser.add_argument(
|
||||
"--retry-build-errors", action="store_true",
|
||||
help="Retry build errors as well.")
|
||||
|
||||
parser.add_argument(
|
||||
"-S", "--enable-slow", action="store_true",
|
||||
help="Execute time-consuming test cases that have been marked "
|
||||
"as 'slow' in testcase.yaml. Normally these are only built.")
|
||||
|
||||
parser.add_argument(
|
||||
"--seed", type=int,
|
||||
help="Seed for native posix pseudo-random number generator")
|
||||
|
||||
parser.add_argument(
|
||||
"--short-build-path",
|
||||
action="store_true",
|
||||
help="Create shorter build directory paths based on symbolic links. "
|
||||
"The shortened build path will be used by CMake for generating "
|
||||
"the build system and executing the build. Use this option if "
|
||||
"you experience build failures related to path length, for "
|
||||
"example on Windows OS. This option can be used only with "
|
||||
"'--ninja' argument (to use Ninja build generator).")
|
||||
|
||||
parser.add_argument(
|
||||
"--show-footprint", action="store_true",
|
||||
help="Show footprint statistics and deltas since last release."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-t", "--tag", action="append",
|
||||
help="Specify tags to restrict which tests to run by tag value. "
|
||||
"Default is to not do any tag filtering. Multiple invocations "
|
||||
"are treated as a logical 'or' relationship.")
|
||||
|
||||
parser.add_argument("--timestamps",
|
||||
action="store_true",
|
||||
help="Print all messages with time stamps.")
|
||||
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--no-update",
|
||||
action="store_true",
|
||||
help="Do not update the results of the last run of twister.")
|
||||
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="count",
|
||||
default=0,
|
||||
help="Emit debugging information, call multiple times to increase "
|
||||
"verbosity.")
|
||||
|
||||
parser.add_argument("-W", "--disable-warnings-as-errors", action="store_true",
|
||||
help="Do not treat warning conditions as errors.")
|
||||
|
||||
parser.add_argument(
|
||||
"--west-flash", nargs='?', const=[],
|
||||
help="""Uses west instead of ninja or make to flash when running with
|
||||
--device-testing. Supports comma-separated argument list.
|
||||
|
||||
E.g "twister --device-testing --device-serial /dev/ttyACM0
|
||||
--west-flash="--board-id=foobar,--erase"
|
||||
will translate to "west flash -- --board-id=foobar --erase"
|
||||
|
||||
NOTE: device-testing must be enabled to use this option.
|
||||
"""
|
||||
)
|
||||
parser.add_argument(
|
||||
"--west-runner",
|
||||
help="""Uses the specified west runner instead of default when running
|
||||
with --west-flash.
|
||||
|
||||
E.g "twister --device-testing --device-serial /dev/ttyACM0
|
||||
--west-flash --west-runner=pyocd"
|
||||
will translate to "west flash --runner pyocd"
|
||||
|
||||
NOTE: west-flash must be enabled to use this option.
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-X", "--fixture", action="append", default=[],
|
||||
help="Specify a fixture that a board might support.")
|
||||
|
||||
parser.add_argument(
|
||||
"-x", "--extra-args", action="append", default=[],
|
||||
help="""Extra CMake cache entries to define when building test cases.
|
||||
May be called multiple times. The key-value entries will be
|
||||
prefixed with -D before being passed to CMake.
|
||||
E.g
|
||||
"twister -x=USE_CCACHE=0"
|
||||
will translate to
|
||||
"cmake -DUSE_CCACHE=0"
|
||||
which will ultimately disable ccache.
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-y", "--dry-run", action="store_true",
|
||||
help="""Create the filtered list of test cases, but don't actually
|
||||
run them. Useful if you're just interested in the test plan
|
||||
generated for every run and saved in the specified output
|
||||
directory (testplan.json).
|
||||
""")
|
||||
|
||||
parser.add_argument(
|
||||
"-z", "--size", action="append",
|
||||
help="Don't run twister. Instead, produce a report to "
|
||||
"stdout detailing RAM/ROM sizes on the specified filenames. "
|
||||
"All other command line arguments ignored.")
|
||||
|
||||
options = parser.parse_args(args)
|
||||
|
||||
# Very early error handling
|
||||
if options.device_serial_pty and os.name == "nt": # OS is Windows
|
||||
logger.error("--device-serial-pty is not supported on Windows OS")
|
||||
sys.exit(1)
|
||||
|
||||
if options.west_runner and options.west_flash is None:
|
||||
logger.error("west-runner requires west-flash to be enabled")
|
||||
sys.exit(1)
|
||||
|
||||
if options.west_flash and not options.device_testing:
|
||||
logger.error("west-flash requires device-testing to be enabled")
|
||||
sys.exit(1)
|
||||
|
||||
if not options.testsuite_root:
|
||||
options.testsuite_root = [os.path.join(ZEPHYR_BASE, "tests"),
|
||||
os.path.join(ZEPHYR_BASE, "samples")]
|
||||
|
||||
if options.show_footprint or options.compare_report:
|
||||
options.enable_size_report = True
|
||||
|
||||
if options.coverage:
|
||||
options.enable_coverage = True
|
||||
|
||||
if not options.coverage_platform:
|
||||
options.coverage_platform = options.platform
|
||||
|
||||
if options.enable_valgrind and not shutil.which("valgrind"):
|
||||
logger.error("valgrind enabled but valgrind executable not found")
|
||||
sys.exit(1)
|
||||
|
||||
if options.device_testing and (options.device_serial or options.device_serial_pty) and len(options.platform) > 1:
|
||||
logger.error("""When --device-testing is used with
|
||||
--device-serial or --device-serial-pty,
|
||||
only one platform is allowed""")
|
||||
sys.exit(1)
|
||||
|
||||
if options.size:
|
||||
from twister.size_calc import SizeCalculator
|
||||
for fn in options.size:
|
||||
sc = SizeCalculator(fn, [])
|
||||
sc.size_report()
|
||||
sys.exit(1)
|
||||
|
||||
return options
|
||||
|
||||
|
||||
class TwisterEnv:
|
||||
|
||||
def __init__(self, options) -> None:
|
||||
def __init__(self, options=None) -> None:
|
||||
self.version = None
|
||||
self.toolchain = None
|
||||
self.options = options
|
||||
if self.options.ninja:
|
||||
if options and options.ninja:
|
||||
self.generator_cmd = "ninja"
|
||||
self.generator = "Ninja"
|
||||
else:
|
||||
|
@ -45,6 +650,22 @@ class TwisterEnv:
|
|||
self.generator = "Unix Makefiles"
|
||||
logger.info(f"Using {self.generator}..")
|
||||
|
||||
if options:
|
||||
self.test_roots = options.testsuite_root
|
||||
else:
|
||||
self.test_roots = None
|
||||
if options:
|
||||
if not isinstance(options.board_root, list):
|
||||
self.board_roots = [self.options.board_root]
|
||||
else:
|
||||
self.board_roots = self.options.board_root
|
||||
self.outdir = os.path.abspath(options.outdir)
|
||||
else:
|
||||
self.board_roots = None
|
||||
self.outdir = None
|
||||
|
||||
self.hwm = None
|
||||
|
||||
def discover(self):
|
||||
self.check_zephyr_version()
|
||||
self.get_toolchain()
|
||||
|
|
|
@ -21,6 +21,7 @@ from multiprocessing.managers import BaseManager
|
|||
from numpy import trace
|
||||
|
||||
from twister.cmakecache import CMakeCache
|
||||
from twister.enviornment import canonical_zephyr_base
|
||||
|
||||
logger = logging.getLogger('twister')
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
|
|
@ -7,10 +7,11 @@ import os
|
|||
import hashlib
|
||||
import random
|
||||
import logging
|
||||
from twister.testsuite import TestCase, TestSuite
|
||||
from twister.testsuite import TestCase
|
||||
from twister.error import BuildError
|
||||
from twister.handlers import BinaryHandler, QEMUHandler, DeviceHandler
|
||||
from distutils.spawn import find_executable
|
||||
|
||||
import shutil
|
||||
import glob
|
||||
|
||||
logger = logging.getLogger('twister')
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
@ -149,7 +150,7 @@ class TestInstance:
|
|||
handler.call_make_run = False
|
||||
handler.binary = os.path.join(self.build_dir, "zephyr", "zephyr.exe")
|
||||
elif self.platform.simulation == "renode":
|
||||
if find_executable("renode"):
|
||||
if shutil.which("renode"):
|
||||
handler = BinaryHandler(self, "renode")
|
||||
handler.pid_fn = os.path.join(self.build_dir, "renode.pid")
|
||||
elif self.platform.simulation == "tsim":
|
||||
|
@ -158,10 +159,10 @@ class TestInstance:
|
|||
handler = DeviceHandler(self, "device")
|
||||
handler.call_make_run = False
|
||||
elif self.platform.simulation == "nsim":
|
||||
if find_executable("nsimdrv"):
|
||||
if shutil.which("nsimdrv"):
|
||||
handler = BinaryHandler(self, "nsim")
|
||||
elif self.platform.simulation == "mdb-nsim":
|
||||
if find_executable("mdb"):
|
||||
if shutil.which("mdb"):
|
||||
handler = BinaryHandler(self, "nsim")
|
||||
elif self.platform.simulation == "armfvp":
|
||||
handler = BinaryHandler(self, "armfvp")
|
||||
|
@ -199,19 +200,19 @@ class TestInstance:
|
|||
filter == 'runnable')
|
||||
|
||||
if self.platform.simulation == "nsim":
|
||||
if not find_executable("nsimdrv"):
|
||||
if not shutil.which("nsimdrv"):
|
||||
target_ready = False
|
||||
|
||||
if self.platform.simulation == "mdb-nsim":
|
||||
if not find_executable("mdb"):
|
||||
if not shutil.which("mdb"):
|
||||
target_ready = False
|
||||
|
||||
if self.platform.simulation == "renode":
|
||||
if not find_executable("renode"):
|
||||
if not shutil.which("renode"):
|
||||
target_ready = False
|
||||
|
||||
if self.platform.simulation == "tsim":
|
||||
if not find_executable("tsim-leon3"):
|
||||
if not shutil.which("tsim-leon3"):
|
||||
target_ready = False
|
||||
|
||||
testsuite_runnable = self.testsuite_runnable(self.testsuite, fixtures)
|
||||
|
|
|
@ -9,12 +9,8 @@ import mmap
|
|||
import sys
|
||||
import re
|
||||
import subprocess
|
||||
import shutil
|
||||
import queue
|
||||
import glob
|
||||
import logging
|
||||
from distutils.spawn import find_executable
|
||||
import colorama
|
||||
import json
|
||||
import collections
|
||||
from typing import List
|
||||
|
@ -166,13 +162,6 @@ class TestPlan:
|
|||
self.options = env.options
|
||||
self.env = env
|
||||
|
||||
self.roots = self.options.testsuite_root
|
||||
if not isinstance(self.options.board_root, list):
|
||||
self.board_roots = [self.options.board_root]
|
||||
else:
|
||||
self.board_roots = self.options.board_root
|
||||
self.outdir = os.path.abspath(self.options.outdir)
|
||||
|
||||
# Keep track of which test cases we've filtered out and why
|
||||
self.testsuites = {}
|
||||
self.quarantine = {}
|
||||
|
@ -453,10 +442,8 @@ class TestPlan:
|
|||
|
||||
|
||||
def add_configurations(self):
|
||||
|
||||
for board_root in self.board_roots:
|
||||
for board_root in self.env.board_roots:
|
||||
board_root = os.path.abspath(board_root)
|
||||
|
||||
logger.debug("Reading platform configuration files under %s..." %
|
||||
board_root)
|
||||
|
||||
|
@ -487,7 +474,7 @@ class TestPlan:
|
|||
return testcases
|
||||
|
||||
def add_testsuites(self, testsuite_filter=[]):
|
||||
for root in self.roots:
|
||||
for root in self.env.test_roots:
|
||||
root = os.path.abspath(root)
|
||||
|
||||
logger.debug("Reading test case configuration files under %s..." % root)
|
||||
|
@ -884,7 +871,7 @@ class TestPlan:
|
|||
platform = self.get_platform(ts["platform"])
|
||||
if filter_platform and platform.name not in filter_platform:
|
||||
continue
|
||||
instance = TestInstance(self.testsuites[testsuite], platform, self.outdir)
|
||||
instance = TestInstance(self.testsuites[testsuite], platform, self.env.outdir)
|
||||
if ts.get("run_id"):
|
||||
instance.run_id = ts.get("run_id")
|
||||
|
||||
|
@ -1013,7 +1000,7 @@ class TestPlan:
|
|||
# list of instances per testsuite, aka configurations.
|
||||
instance_list = []
|
||||
for plat in platform_scope:
|
||||
instance = TestInstance(ts, plat, self.outdir)
|
||||
instance = TestInstance(ts, plat, self.env.outdir)
|
||||
if runnable:
|
||||
tfilter = 'runnable'
|
||||
else:
|
||||
|
@ -1214,7 +1201,7 @@ class TestPlan:
|
|||
"""
|
||||
|
||||
links_dir_name = "twister_links" # folder for all links
|
||||
links_dir_path = os.path.join(self.outdir, links_dir_name)
|
||||
links_dir_path = os.path.join(self.env.outdir, links_dir_name)
|
||||
if not os.path.exists(links_dir_path):
|
||||
os.mkdir(links_dir_path)
|
||||
|
||||
|
|
|
@ -11,7 +11,10 @@ import pytest
|
|||
|
||||
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
|
||||
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
|
||||
from twisterlib import TestPlan, TestInstance
|
||||
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts"))
|
||||
from twister.testplan import TestPlan
|
||||
from twister.testinstance import TestInstance
|
||||
from twister.enviornment import TwisterEnv, parse_arguments
|
||||
|
||||
def new_get_toolchain(*args, **kwargs):
|
||||
return 'zephyr'
|
||||
|
@ -29,14 +32,26 @@ def testsuites_directory():
|
|||
""" Pytest fixture to load the test data directory"""
|
||||
return ZEPHYR_BASE + "/scripts/tests/twister/test_data/testsuites"
|
||||
|
||||
@pytest.fixture(name='class_testplan')
|
||||
def testsuite_obj(test_data, testsuites_dir, tmpdir_factory):
|
||||
@pytest.fixture(name='class_env')
|
||||
def tesenv_obj(test_data, testsuites_dir, tmpdir_factory):
|
||||
""" Pytest fixture to initialize and return the class TestPlan object"""
|
||||
board_root = test_data +"board_config/1_level/2_level/"
|
||||
testcase_root = [testsuites_dir + '/tests', testsuites_dir + '/samples']
|
||||
outdir = tmpdir_factory.mktemp("sanity_out_demo")
|
||||
suite = TestPlan(board_root, testcase_root, outdir)
|
||||
return suite
|
||||
options = parse_arguments([])
|
||||
env = TwisterEnv(options)
|
||||
env.board_roots = [test_data +"board_config/1_level/2_level/"]
|
||||
env.test_roots = [testsuites_dir + '/tests', testsuites_dir + '/samples']
|
||||
env.outdir = tmpdir_factory.mktemp("sanity_out_demo")
|
||||
return env
|
||||
|
||||
|
||||
@pytest.fixture(name='class_testplan')
|
||||
def testplan_obj(test_data, class_env, testsuites_dir, tmpdir_factory):
|
||||
""" Pytest fixture to initialize and return the class TestPlan object"""
|
||||
env = class_env
|
||||
env.board_roots = [test_data +"board_config/1_level/2_level/"]
|
||||
env.test_roots = [testsuites_dir + '/tests', testsuites_dir + '/samples']
|
||||
env.outdir = tmpdir_factory.mktemp("sanity_out_demo")
|
||||
plan = TestPlan(env)
|
||||
return plan
|
||||
|
||||
@pytest.fixture(name='all_testsuites_dict')
|
||||
def testsuites_dict(class_testplan):
|
||||
|
@ -51,8 +66,8 @@ def testsuites_dict(class_testplan):
|
|||
def all_platforms_list(test_data, class_testplan):
|
||||
""" Pytest fixture to call add_configurations function of
|
||||
Testsuite class and return the Platforms list"""
|
||||
class_testplan.board_roots = os.path.abspath(test_data + "board_config")
|
||||
plan = TestPlan(class_testplan.board_roots, class_testplan.roots, class_testplan.outdir)
|
||||
class_testplan.env.board_roots = [os.path.abspath(test_data + "board_config")]
|
||||
plan = TestPlan(class_testplan.env)
|
||||
plan.add_configurations()
|
||||
return plan.platforms
|
||||
|
||||
|
|
|
@ -1,177 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2020 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# pylint: disable=line-too-long
|
||||
"""
|
||||
Tests for testinstance class
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import pytest
|
||||
|
||||
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
|
||||
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
|
||||
from twisterlib import (TestInstance, BuildError, TestSuite, TwisterException,
|
||||
ScanPathResult)
|
||||
|
||||
|
||||
TESTDATA_1 = [
|
||||
(False, False, "console", "na", "qemu", False, [], (False, True)),
|
||||
(False, False, "console", "native", "qemu", False, [], (False, True)),
|
||||
(True, False, "console", "native", "nsim", False, [], (True, False)),
|
||||
(True, True, "console", "native", "renode", False, [], (True, False)),
|
||||
(False, False, "sensor", "native", "", False, [], (True, False)),
|
||||
(False, False, "sensor", "na", "", False, [], (True, False)),
|
||||
(False, True, "sensor", "native", "", True, [], (True, False)),
|
||||
]
|
||||
@pytest.mark.parametrize("build_only, slow, harness, platform_type, platform_sim, device_testing,fixture, expected", TESTDATA_1)
|
||||
def test_check_build_or_run(class_testplan, monkeypatch, all_testsuites_dict, platforms_list, build_only, slow, harness, platform_type, platform_sim, device_testing, fixture, expected):
|
||||
"""" Test to check the conditions for build_only and run scenarios
|
||||
Scenario 1: Test when different parameters are passed, build_only and run are set correctly
|
||||
Scenario 2: Test if build_only is enabled when the OS is Windows"""
|
||||
|
||||
class_testplan.testsuites = all_testsuites_dict
|
||||
testsuite = class_testplan.testsuites.get('scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1')
|
||||
|
||||
|
||||
class_testplan.platforms = platforms_list
|
||||
platform = class_testplan.get_platform("demo_board_2")
|
||||
platform.type = platform_type
|
||||
platform.simulation = platform_sim
|
||||
testsuite.harness = harness
|
||||
testsuite.build_only = build_only
|
||||
testsuite.slow = slow
|
||||
|
||||
testinstance = TestInstance(testsuite, platform, class_testplan.outdir)
|
||||
run = testinstance.check_runnable(slow, device_testing, fixture)
|
||||
_, r = expected
|
||||
assert run == r
|
||||
|
||||
monkeypatch.setattr("os.name", "nt")
|
||||
run = testinstance.check_runnable()
|
||||
assert not run
|
||||
|
||||
TESTDATA_2 = [
|
||||
(True, True, True, ["demo_board_2"], "native", '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y\nCONFIG_ASAN=y\nCONFIG_UBSAN=y'),
|
||||
(True, False, True, ["demo_board_2"], "native", '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y\nCONFIG_ASAN=y'),
|
||||
(False, False, True, ["demo_board_2"], 'native', '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y'),
|
||||
(True, False, True, ["demo_board_2"], 'mcu', '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y'),
|
||||
(False, False, False, ["demo_board_2"], 'native', ''),
|
||||
(False, False, True, ['demo_board_1'], 'native', ''),
|
||||
(True, False, False, ["demo_board_2"], 'native', '\nCONFIG_ASAN=y'),
|
||||
(False, True, False, ["demo_board_2"], 'native', '\nCONFIG_UBSAN=y'),
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize("enable_asan, enable_ubsan, enable_coverage, coverage_platform, platform_type, expected_content", TESTDATA_2)
|
||||
def test_create_overlay(class_testplan, all_testsuites_dict, platforms_list, enable_asan, enable_ubsan, enable_coverage, coverage_platform, platform_type, expected_content):
|
||||
"""Test correct content is written to testcase_extra.conf based on if conditions
|
||||
TO DO: Add extra_configs to the input list"""
|
||||
class_testplan.testsuites = all_testsuites_dict
|
||||
testcase = class_testplan.testsuites.get('scripts/tests/twister/test_data/testsuites/samples/test_app/sample_test.app')
|
||||
class_testplan.platforms = platforms_list
|
||||
platform = class_testplan.get_platform("demo_board_2")
|
||||
|
||||
testinstance = TestInstance(testcase, platform, class_testplan.outdir)
|
||||
platform.type = platform_type
|
||||
assert testinstance.create_overlay(platform, enable_asan, enable_ubsan, enable_coverage, coverage_platform) == expected_content
|
||||
|
||||
def test_calculate_sizes(class_testplan, all_testsuites_dict, platforms_list):
|
||||
""" Test Calculate sizes method for zephyr elf"""
|
||||
class_testplan.testsuites = all_testsuites_dict
|
||||
testcase = class_testplan.testsuites.get('scripts/tests/twister/test_data/testsuites/samples/test_app/sample_test.app')
|
||||
class_testplan.platforms = platforms_list
|
||||
platform = class_testplan.get_platform("demo_board_2")
|
||||
testinstance = TestInstance(testcase, platform, class_testplan.outdir)
|
||||
|
||||
with pytest.raises(BuildError):
|
||||
assert testinstance.calculate_sizes() == "Missing/multiple output ELF binary"
|
||||
|
||||
TESTDATA_3 = [
|
||||
(ZEPHYR_BASE + '/scripts/tests/twister/test_data/testsuites', ZEPHYR_BASE, '/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1', '/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1'),
|
||||
(ZEPHYR_BASE, '.', 'test_a.check_1', 'test_a.check_1'),
|
||||
(ZEPHYR_BASE, '/scripts/tests/twister/test_data/testsuites/test_b', 'test_b.check_1', '/scripts/tests/twister/test_data/testsuites/test_b/test_b.check_1'),
|
||||
(os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', 'test_b.check_1', 'test_b.check_1'),
|
||||
(os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', '.', '.'),
|
||||
(ZEPHYR_BASE, '.', 'test_a.check_1.check_2', 'test_a.check_1.check_2'),
|
||||
]
|
||||
@pytest.mark.parametrize("testcase_root, workdir, name, expected", TESTDATA_3)
|
||||
def test_get_unique(testcase_root, workdir, name, expected):
|
||||
'''Test to check if the unique name is given for each testcase root and workdir'''
|
||||
unique = TestSuite(testcase_root, workdir, name)
|
||||
assert unique.name == expected
|
||||
|
||||
TESTDATA_4 = [
|
||||
(ZEPHYR_BASE, '.', 'test_c', 'Tests should reference the category and subsystem with a dot as a separator.'),
|
||||
(os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', '', 'Tests should reference the category and subsystem with a dot as a separator.'),
|
||||
]
|
||||
@pytest.mark.parametrize("testcase_root, workdir, name, exception", TESTDATA_4)
|
||||
def test_get_unique_exception(testcase_root, workdir, name, exception):
|
||||
'''Test to check if tests reference the category and subsystem with a dot as a separator'''
|
||||
|
||||
with pytest.raises(TwisterException):
|
||||
unique = TestSuite(testcase_root, workdir, name)
|
||||
assert unique == exception
|
||||
|
||||
|
||||
TESTDATA_5 = [
|
||||
("testsuites/tests/test_ztest.c",
|
||||
ScanPathResult(
|
||||
warnings=None,
|
||||
matches=['a', 'c', 'unit_a',
|
||||
'newline',
|
||||
'test_test_aa',
|
||||
'user', 'last'],
|
||||
has_registered_test_suites=False,
|
||||
has_run_registered_test_suites=False,
|
||||
has_test_main=False,
|
||||
ztest_suite_names = ["test_api"])),
|
||||
("testsuites/tests/test_a/test_ztest_error.c",
|
||||
ScanPathResult(
|
||||
warnings="Found a test that does not start with test_",
|
||||
matches=['1a', '1c', '2a', '2b'],
|
||||
has_registered_test_suites=False,
|
||||
has_run_registered_test_suites=False,
|
||||
has_test_main=True,
|
||||
ztest_suite_names = ["feature1", "feature2"])),
|
||||
("testsuites/tests/test_a/test_ztest_error_1.c",
|
||||
ScanPathResult(
|
||||
warnings="found invalid #ifdef, #endif in ztest_test_suite()",
|
||||
matches=['unit_1a', 'unit_1b', 'Unit_1c'],
|
||||
has_registered_test_suites=False,
|
||||
has_run_registered_test_suites=False,
|
||||
has_test_main=False,
|
||||
ztest_suite_names = ["feature3"])),
|
||||
("testsuites/tests/test_d/test_ztest_error_register_test_suite.c",
|
||||
ScanPathResult(
|
||||
warnings=None, matches=['unit_1a', 'unit_1b'],
|
||||
has_registered_test_suites=True,
|
||||
has_run_registered_test_suites=False,
|
||||
has_test_main=False,
|
||||
ztest_suite_names = ["feature4"])),
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize("test_file, expected", TESTDATA_5)
|
||||
def test_scan_file(test_data, test_file, expected: ScanPathResult):
|
||||
'''Testing scan_file method with different ztest files for warnings and results'''
|
||||
|
||||
testcase = TestSuite("/scripts/tests/twister/test_data/testsuites/tests", ".",
|
||||
"test_a.check_1")
|
||||
|
||||
result: ScanPathResult = testcase.scan_file(os.path.join(test_data, test_file))
|
||||
assert result == expected
|
||||
|
||||
|
||||
TESTDATA_6 = [
|
||||
(
|
||||
"testsuites/tests",
|
||||
['a', 'c', 'unit_a', 'newline', 'test_test_aa', 'user', 'last'],
|
||||
["test_api"]
|
||||
),
|
||||
(
|
||||
"testsuites/tests/test_a",
|
||||
['unit_1a', 'unit_1b', 'Unit_1c', '1a', '1c', '2a', '2b'],
|
||||
["feature3", "feature1", "feature2"]
|
||||
),
|
||||
]
|
|
@ -13,7 +13,12 @@ import pytest
|
|||
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
|
||||
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
|
||||
|
||||
from twisterlib import TestSuite, TestPlan, TestInstance, Platform
|
||||
from twister.testplan import TestPlan, ScanPathResult
|
||||
from twister.testinstance import TestInstance
|
||||
from twister.error import BuildError, TwisterException
|
||||
from twister.testsuite import TestSuite
|
||||
from twister.platform import Platform
|
||||
|
||||
|
||||
def test_testplan_add_testsuites(class_testplan):
|
||||
""" Testing add_testcase function of Testsuite class in twister """
|
||||
|
@ -41,22 +46,24 @@ def test_testplan_add_testsuites(class_testplan):
|
|||
assert all(isinstance(n, TestSuite) for n in class_testplan.testsuites.values())
|
||||
|
||||
@pytest.mark.parametrize("board_root_dir", [("board_config_file_not_exist"), ("board_config")])
|
||||
def test_add_configurations(test_data, class_testplan, board_root_dir):
|
||||
def test_add_configurations(test_data, class_env, board_root_dir):
|
||||
""" Testing add_configurations function of TestPlan class in Twister
|
||||
Test : Asserting on default platforms list
|
||||
"""
|
||||
class_testplan.board_roots = os.path.abspath(test_data + board_root_dir)
|
||||
suite = TestPlan(class_testplan.board_roots, class_testplan.roots, class_testplan.outdir)
|
||||
class_env.board_roots = [os.path.abspath(test_data + board_root_dir)]
|
||||
plan = TestPlan(class_env)
|
||||
if board_root_dir == "board_config":
|
||||
suite.add_configurations()
|
||||
assert sorted(suite.default_platforms) == sorted(['demo_board_1', 'demo_board_3'])
|
||||
plan.add_configurations()
|
||||
assert sorted(plan.default_platforms) == sorted(['demo_board_1', 'demo_board_3'])
|
||||
elif board_root_dir == "board_config_file_not_exist":
|
||||
suite.add_configurations()
|
||||
assert sorted(suite.default_platforms) != sorted(['demo_board_1'])
|
||||
plan.add_configurations()
|
||||
assert sorted(plan.default_platforms) != sorted(['demo_board_1'])
|
||||
|
||||
def test_get_all_testsuites(class_testplan, all_testsuites_dict):
|
||||
|
||||
def test_get_all_testsuites(class_env, all_testsuites_dict):
|
||||
""" Testing get_all_testsuites function of TestPlan class in Twister """
|
||||
class_testplan.testsuites = all_testsuites_dict
|
||||
plan = TestPlan(class_env)
|
||||
plan.testsuites = all_testsuites_dict
|
||||
expected_tests = ['sample_test.app', 'test_a.check_1.1a',
|
||||
'test_a.check_1.1c',
|
||||
'test_a.check_1.2a', 'test_a.check_1.2b',
|
||||
|
@ -68,15 +75,16 @@ def test_get_all_testsuites(class_testplan, all_testsuites_dict):
|
|||
'test_b.check_1', 'test_b.check_2', 'test_c.check_1',
|
||||
'test_c.check_2', 'test_d.check_1.unit_1a',
|
||||
'test_d.check_1.unit_1b']
|
||||
tests = class_testplan.get_all_tests()
|
||||
tests = plan.get_all_tests()
|
||||
result = [c.name for c in tests]
|
||||
assert len(class_testplan.get_all_tests()) == len(expected_tests)
|
||||
assert len(plan.get_all_tests()) == len(expected_tests)
|
||||
assert sorted(result) == sorted(expected_tests)
|
||||
|
||||
def test_get_platforms(class_testplan, platforms_list):
|
||||
def test_get_platforms(class_env, platforms_list):
|
||||
""" Testing get_platforms function of TestPlan class in Twister """
|
||||
class_testplan.platforms = platforms_list
|
||||
platform = class_testplan.get_platform("demo_board_1")
|
||||
plan = TestPlan(class_env)
|
||||
plan.platforms = platforms_list
|
||||
platform = plan.get_platform("demo_board_1")
|
||||
assert isinstance(platform, Platform)
|
||||
assert platform.name == "demo_board_1"
|
||||
|
||||
|
@ -99,19 +107,20 @@ TESTDATA_PART1 = [
|
|||
|
||||
@pytest.mark.parametrize("tc_attribute, tc_value, plat_attribute, plat_value, expected_discards",
|
||||
TESTDATA_PART1)
|
||||
def test_apply_filters_part1(class_testplan, all_testsuites_dict, platforms_list,
|
||||
def test_apply_filters_part1(class_env, all_testsuites_dict, platforms_list,
|
||||
tc_attribute, tc_value, plat_attribute, plat_value, expected_discards):
|
||||
""" Testing apply_filters function of TestPlan class in Twister
|
||||
Part 1: Response of apply_filters function have
|
||||
appropriate values according to the filters
|
||||
"""
|
||||
plan = TestPlan(class_env)
|
||||
if tc_attribute is None and plat_attribute is None:
|
||||
class_testplan.apply_filters()
|
||||
plan.apply_filters()
|
||||
|
||||
class_testplan.platforms = platforms_list
|
||||
class_testplan.platform_names = [p.name for p in platforms_list]
|
||||
class_testplan.testsuites = all_testsuites_dict
|
||||
for plat in class_testplan.platforms:
|
||||
plan.platforms = platforms_list
|
||||
plan.platform_names = [p.name for p in platforms_list]
|
||||
plan.testsuites = all_testsuites_dict
|
||||
for plat in plan.platforms:
|
||||
if plat_attribute == "ignore_tags":
|
||||
plat.ignore_tags = plat_value
|
||||
if plat_attribute == "flash":
|
||||
|
@ -123,7 +132,7 @@ def test_apply_filters_part1(class_testplan, all_testsuites_dict, platforms_list
|
|||
plat.env_satisfied = False
|
||||
if plat_attribute == "supported_toolchains":
|
||||
plat.supported_toolchains = plat_value
|
||||
for _, testcase in class_testplan.testsuites.items():
|
||||
for _, testcase in plan.testsuites.items():
|
||||
if tc_attribute == "toolchain_allow":
|
||||
testcase.toolchain_allow = tc_value
|
||||
if tc_attribute == "platform_allow":
|
||||
|
@ -146,20 +155,20 @@ def test_apply_filters_part1(class_testplan, all_testsuites_dict, platforms_list
|
|||
testcase.min_ram = tc_value
|
||||
|
||||
if tc_attribute == "build_on_all":
|
||||
for _, testcase in class_testplan.testsuites.items():
|
||||
for _, testcase in plan.testsuites.items():
|
||||
testcase.build_on_all = tc_value
|
||||
class_testplan.apply_filters(exclude_platform=['demo_board_1'])
|
||||
plan.apply_filters(exclude_platform=['demo_board_1'])
|
||||
elif plat_attribute == "supported_toolchains":
|
||||
class_testplan.apply_filters(force_toolchain=False,
|
||||
plan.apply_filters(force_toolchain=False,
|
||||
exclude_platform=['demo_board_1'],
|
||||
platform=['demo_board_2'])
|
||||
elif tc_attribute is None and plat_attribute is None:
|
||||
class_testplan.apply_filters()
|
||||
plan.apply_filters()
|
||||
else:
|
||||
class_testplan.apply_filters(exclude_platform=['demo_board_1'],
|
||||
plan.apply_filters(exclude_platform=['demo_board_1'],
|
||||
platform=['demo_board_2'])
|
||||
|
||||
filtered_instances = list(filter(lambda item: item.status == "filtered", class_testplan.instances.values()))
|
||||
filtered_instances = list(filter(lambda item: item.status == "filtered", plan.instances.values()))
|
||||
for d in filtered_instances:
|
||||
assert d.reason == expected_discards
|
||||
|
||||
|
@ -227,22 +236,23 @@ def test_apply_filters_part3(class_testplan, all_testsuites_dict, platforms_list
|
|||
filtered_instances = list(filter(lambda item: item.status == "filtered", class_testplan.instances.values()))
|
||||
assert not filtered_instances
|
||||
|
||||
def test_add_instances(test_data, class_testplan, all_testsuites_dict, platforms_list):
|
||||
def test_add_instances(test_data, class_env, all_testsuites_dict, platforms_list):
|
||||
""" Testing add_instances() function of TestPlan class in Twister
|
||||
Test 1: instances dictionary keys have expected values (Platform Name + Testcase Name)
|
||||
Test 2: Values of 'instances' dictionary in Testsuite class are an
|
||||
instance of 'TestInstance' class
|
||||
Test 3: Values of 'instances' dictionary have expected values.
|
||||
"""
|
||||
class_testplan.outdir = test_data
|
||||
class_testplan.platforms = platforms_list
|
||||
platform = class_testplan.get_platform("demo_board_2")
|
||||
class_env.outdir = test_data
|
||||
plan = TestPlan(class_env)
|
||||
plan.platforms = platforms_list
|
||||
platform = plan.get_platform("demo_board_2")
|
||||
instance_list = []
|
||||
for _, testcase in all_testsuites_dict.items():
|
||||
instance = TestInstance(testcase, platform, class_testplan.outdir)
|
||||
instance = TestInstance(testcase, platform, class_env.outdir)
|
||||
instance_list.append(instance)
|
||||
class_testplan.add_instances(instance_list)
|
||||
assert list(class_testplan.instances.keys()) == \
|
||||
plan.add_instances(instance_list)
|
||||
assert list(plan.instances.keys()) == \
|
||||
[platform.name + '/' + s for s in list(all_testsuites_dict.keys())]
|
||||
assert all(isinstance(n, TestInstance) for n in list(class_testplan.instances.values()))
|
||||
assert list(class_testplan.instances.values()) == instance_list
|
||||
assert all(isinstance(n, TestInstance) for n in list(plan.instances.values()))
|
||||
assert list(plan.instances.values()) == instance_list
|
||||
|
|
|
@ -14,7 +14,7 @@ ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
|
|||
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
|
||||
|
||||
import scl
|
||||
from twisterlib import TwisterConfigParser
|
||||
from twister.testplan import TwisterConfigParser
|
||||
|
||||
def test_yamlload():
|
||||
""" Test to check if loading the non-existent files raises the errors """
|
||||
|
|
610
scripts/twister
610
scripts/twister
|
@ -165,7 +165,6 @@ Most everyday users will run with no arguments.
|
|||
"""
|
||||
|
||||
import os
|
||||
import argparse
|
||||
import sys
|
||||
import logging
|
||||
import time
|
||||
|
@ -174,8 +173,6 @@ import colorama
|
|||
from colorama import Fore
|
||||
from pathlib import Path
|
||||
|
||||
import queue
|
||||
|
||||
|
||||
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
|
||||
if not ZEPHYR_BASE:
|
||||
|
@ -199,7 +196,7 @@ except ImportError:
|
|||
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
|
||||
|
||||
from twister.testplan import TestPlan
|
||||
from twister.enviornment import TwisterEnv
|
||||
from twister.enviornment import TwisterEnv, parse_arguments
|
||||
from twister.reports import Reporting
|
||||
from twister.hardwaremap import HardwareMap
|
||||
from twister.coverage import run_coverage
|
||||
|
@ -208,609 +205,6 @@ from twister.runner import TwisterRunner
|
|||
logger = logging.getLogger('twister')
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
def parse_arguments():
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
parser.fromfile_prefix_chars = "+"
|
||||
|
||||
case_select = parser.add_argument_group("Test case selection",
|
||||
"""
|
||||
Artificially long but functional example:
|
||||
$ ./scripts/twister -v \\
|
||||
--testsuite-root tests/ztest/base \\
|
||||
--testsuite-root tests/kernel \\
|
||||
--test tests/ztest/base/testing.ztest.verbose_0 \\
|
||||
--test tests/kernel/fifo/fifo_api/kernel.fifo
|
||||
|
||||
"kernel.fifo.poll" is one of the test section names in
|
||||
__/fifo_api/testcase.yaml
|
||||
""")
|
||||
|
||||
compare_group_option = parser.add_mutually_exclusive_group()
|
||||
|
||||
platform_group_option = parser.add_mutually_exclusive_group()
|
||||
|
||||
run_group_option = parser.add_mutually_exclusive_group()
|
||||
|
||||
serial = parser.add_mutually_exclusive_group(required="--device-testing" in sys.argv)
|
||||
|
||||
test_or_build = parser.add_mutually_exclusive_group()
|
||||
|
||||
test_xor_subtest = case_select.add_mutually_exclusive_group()
|
||||
|
||||
test_xor_generator = case_select.add_mutually_exclusive_group()
|
||||
|
||||
valgrind_asan_group = parser.add_mutually_exclusive_group()
|
||||
|
||||
case_select.add_argument(
|
||||
"-E",
|
||||
"--save-tests",
|
||||
metavar="FILENAME",
|
||||
action="store",
|
||||
help="Append list of tests and platforms to be run to file.")
|
||||
|
||||
case_select.add_argument(
|
||||
"-F",
|
||||
"--load-tests",
|
||||
metavar="FILENAME",
|
||||
action="store",
|
||||
help="Load list of tests and platforms to be run from file.")
|
||||
|
||||
case_select.add_argument(
|
||||
"-T", "--testsuite-root", action="append", default=[],
|
||||
help="Base directory to recursively search for test cases. All "
|
||||
"testcase.yaml files under here will be processed. May be "
|
||||
"called multiple times. Defaults to the 'samples/' and "
|
||||
"'tests/' directories at the base of the Zephyr tree.")
|
||||
|
||||
case_select.add_argument(
|
||||
"-f",
|
||||
"--only-failed",
|
||||
action="store_true",
|
||||
help="Run only those tests that failed the previous twister run "
|
||||
"invocation.")
|
||||
|
||||
case_select.add_argument("--list-tests", action="store_true",
|
||||
help="""List of all sub-test functions recursively found in
|
||||
all --testsuite-root arguments. Note different sub-tests can share
|
||||
the same section name and come from different directories.
|
||||
The output is flattened and reports --sub-test names only,
|
||||
not their directories. For instance net.socket.getaddrinfo_ok
|
||||
and net.socket.fd_set belong to different directories.
|
||||
""")
|
||||
|
||||
case_select.add_argument("--list-test-duplicates", action="store_true",
|
||||
help="""List tests with duplicate identifiers.
|
||||
""")
|
||||
|
||||
case_select.add_argument("--test-tree", action="store_true",
|
||||
help="""Output the test plan in a tree form""")
|
||||
|
||||
compare_group_option.add_argument("--compare-report",
|
||||
help="Use this report file for size comparison")
|
||||
|
||||
compare_group_option.add_argument(
|
||||
"-m", "--last-metrics", action="store_true",
|
||||
help="Compare with the results of the previous twister "
|
||||
"invocation")
|
||||
|
||||
platform_group_option.add_argument(
|
||||
"-G",
|
||||
"--integration",
|
||||
action="store_true",
|
||||
help="Run integration tests")
|
||||
|
||||
platform_group_option.add_argument(
|
||||
"--emulation-only", action="store_true",
|
||||
help="Only build and run emulation platforms")
|
||||
|
||||
run_group_option.add_argument(
|
||||
"--device-testing", action="store_true",
|
||||
help="Test on device directly. Specify the serial device to "
|
||||
"use with the --device-serial option.")
|
||||
|
||||
run_group_option.add_argument("--generate-hardware-map",
|
||||
help="""Probe serial devices connected to this platform
|
||||
and create a hardware map file to be used with
|
||||
--device-testing
|
||||
""")
|
||||
|
||||
serial.add_argument("--device-serial",
|
||||
help="""Serial device for accessing the board
|
||||
(e.g., /dev/ttyACM0)
|
||||
""")
|
||||
|
||||
serial.add_argument("--device-serial-pty",
|
||||
help="""Script for controlling pseudoterminal.
|
||||
Twister believes that it interacts with a terminal
|
||||
when it actually interacts with the script.
|
||||
|
||||
E.g "twister --device-testing
|
||||
--device-serial-pty <script>
|
||||
""")
|
||||
|
||||
serial.add_argument("--hardware-map",
|
||||
help="""Load hardware map from a file. This will be used
|
||||
for testing on hardware that is listed in the file.
|
||||
""")
|
||||
|
||||
test_or_build.add_argument(
|
||||
"-b", "--build-only", action="store_true",
|
||||
help="Only build the code, do not execute any of it in QEMU")
|
||||
|
||||
test_or_build.add_argument(
|
||||
"--test-only", action="store_true",
|
||||
help="""Only run device tests with current artifacts, do not build
|
||||
the code""")
|
||||
|
||||
test_xor_subtest.add_argument(
|
||||
"-s", "--test", action="append",
|
||||
help="Run only the specified testsuite scenario. These are named by "
|
||||
"<path/relative/to/Zephyr/base/section.name.in.testcase.yaml>")
|
||||
|
||||
test_xor_subtest.add_argument(
|
||||
"--sub-test", action="append",
|
||||
help="""Recursively find sub-test functions and run the entire
|
||||
test section where they were found, including all sibling test
|
||||
functions. Sub-tests are named by:
|
||||
section.name.in.testcase.yaml.function_name_without_test_prefix
|
||||
Example: In kernel.fifo.fifo_loop: 'kernel.fifo' is a section name
|
||||
and 'fifo_loop' is a name of a function found in main.c without test prefix.
|
||||
""")
|
||||
|
||||
valgrind_asan_group.add_argument(
|
||||
"--enable-valgrind", action="store_true",
|
||||
help="""Run binary through valgrind and check for several memory access
|
||||
errors. Valgrind needs to be installed on the host. This option only
|
||||
works with host binaries such as those generated for the native_posix
|
||||
configuration and is mutual exclusive with --enable-asan.
|
||||
""")
|
||||
|
||||
valgrind_asan_group.add_argument(
|
||||
"--enable-asan", action="store_true",
|
||||
help="""Enable address sanitizer to check for several memory access
|
||||
errors. Libasan needs to be installed on the host. This option only
|
||||
works with host binaries such as those generated for the native_posix
|
||||
configuration and is mutual exclusive with --enable-valgrind.
|
||||
""")
|
||||
|
||||
# Start of individual args place them in alpha-beta order
|
||||
|
||||
board_root_list = ["%s/boards" % ZEPHYR_BASE,
|
||||
"%s/scripts/pylib/twister/boards" % ZEPHYR_BASE]
|
||||
|
||||
parser.add_argument(
|
||||
"-A", "--board-root", action="append", default=board_root_list,
|
||||
help="""Directory to search for board configuration files. All .yaml
|
||||
files in the directory will be processed. The directory should have the same
|
||||
structure in the main Zephyr tree: boards/<arch>/<board_name>/""")
|
||||
|
||||
parser.add_argument(
|
||||
"-a", "--arch", action="append",
|
||||
help="Arch filter for testing. Takes precedence over --platform. "
|
||||
"If unspecified, test all arches. Multiple invocations "
|
||||
"are treated as a logical 'or' relationship")
|
||||
|
||||
parser.add_argument(
|
||||
"-B", "--subset",
|
||||
help="Only run a subset of the tests, 1/4 for running the first 25%%, "
|
||||
"3/5 means run the 3rd fifth of the total. "
|
||||
"This option is useful when running a large number of tests on "
|
||||
"different hosts to speed up execution time.")
|
||||
|
||||
parser.add_argument("-C", "--coverage", action="store_true",
|
||||
help="Generate coverage reports. Implies "
|
||||
"--enable-coverage.")
|
||||
|
||||
parser.add_argument(
|
||||
"-c", "--clobber-output", action="store_true",
|
||||
help="Cleaning the output directory will simply delete it instead "
|
||||
"of the default policy of renaming.")
|
||||
|
||||
parser.add_argument(
|
||||
"--cmake-only", action="store_true",
|
||||
help="Only run cmake, do not build or run.")
|
||||
|
||||
parser.add_argument("--coverage-basedir", default=ZEPHYR_BASE,
|
||||
help="Base source directory for coverage report.")
|
||||
|
||||
parser.add_argument("--coverage-platform", action="append", default=[],
|
||||
help="Platforms to run coverage reports on. "
|
||||
"This option may be used multiple times. "
|
||||
"Default to what was selected with --platform.")
|
||||
|
||||
parser.add_argument("--coverage-tool", choices=['lcov', 'gcovr'], default='lcov',
|
||||
help="Tool to use to generate coverage report.")
|
||||
|
||||
parser.add_argument(
|
||||
"-D", "--all-deltas", action="store_true",
|
||||
help="Show all footprint deltas, positive or negative. Implies "
|
||||
"--footprint-threshold=0")
|
||||
|
||||
parser.add_argument(
|
||||
"--device-serial-baud", action="store", default=None,
|
||||
help="Serial device baud rate (default 115200)")
|
||||
|
||||
parser.add_argument("--disable-asserts", action="store_false",
|
||||
dest="enable_asserts",
|
||||
help="deprecated, left for compatibility")
|
||||
|
||||
parser.add_argument(
|
||||
"--disable-unrecognized-section-test", action="store_true",
|
||||
default=False,
|
||||
help="Skip the 'unrecognized section' test.")
|
||||
|
||||
parser.add_argument(
|
||||
"--disable-suite-name-check", action="store_true", default=False,
|
||||
help="Disable extended test suite name verification at the beginning "
|
||||
"of Ztest test. This option could be useful for tests or "
|
||||
"platforms, which from some reasons cannot print early logs.")
|
||||
|
||||
parser.add_argument("-e", "--exclude-tag", action="append",
|
||||
help="Specify tags of tests that should not run. "
|
||||
"Default is to run all tests with all tags.")
|
||||
|
||||
parser.add_argument("--enable-coverage", action="store_true",
|
||||
help="Enable code coverage using gcov.")
|
||||
|
||||
parser.add_argument(
|
||||
"--enable-lsan", action="store_true",
|
||||
help="""Enable leak sanitizer to check for heap memory leaks.
|
||||
Libasan needs to be installed on the host. This option only
|
||||
works with host binaries such as those generated for the native_posix
|
||||
configuration and when --enable-asan is given.
|
||||
""")
|
||||
|
||||
parser.add_argument(
|
||||
"--enable-ubsan", action="store_true",
|
||||
help="""Enable undefined behavior sanitizer to check for undefined
|
||||
behaviour during program execution. It uses an optional runtime library
|
||||
to provide better error diagnostics. This option only works with host
|
||||
binaries such as those generated for the native_posix configuration.
|
||||
""")
|
||||
|
||||
parser.add_argument("--enable-size-report", action="store_true",
|
||||
help="Enable expensive computation of RAM/ROM segment sizes.")
|
||||
|
||||
parser.add_argument(
|
||||
"--filter", choices=['buildable', 'runnable'],
|
||||
default='buildable',
|
||||
help="""Filter tests to be built and executed. By default everything is
|
||||
built and if a test is runnable (emulation or a connected device), it
|
||||
is run. This option allows for example to only build tests that can
|
||||
actually be run. Runnable is a subset of buildable.""")
|
||||
|
||||
parser.add_argument("--force-color", action="store_true",
|
||||
help="Always output ANSI color escape sequences "
|
||||
"even when the output is redirected (not a tty)")
|
||||
|
||||
parser.add_argument("--force-toolchain", action="store_true",
|
||||
help="Do not filter based on toolchain, use the set "
|
||||
" toolchain unconditionally")
|
||||
|
||||
parser.add_argument("--gcov-tool", default=None,
|
||||
help="Path to the gcov tool to use for code coverage "
|
||||
"reports")
|
||||
|
||||
parser.add_argument(
|
||||
"-H", "--footprint-threshold", type=float, default=5,
|
||||
help="When checking test case footprint sizes, warn the user if "
|
||||
"the new app size is greater then the specified percentage "
|
||||
"from the last release. Default is 5. 0 to warn on any "
|
||||
"increase on app size.")
|
||||
|
||||
parser.add_argument(
|
||||
"-i", "--inline-logs", action="store_true",
|
||||
help="Upon test failure, print relevant log data to stdout "
|
||||
"instead of just a path to it.")
|
||||
|
||||
parser.add_argument(
|
||||
"-j", "--jobs", type=int,
|
||||
help="Number of jobs for building, defaults to number of CPU threads, "
|
||||
"overcommitted by factor 2 when --build-only.")
|
||||
|
||||
parser.add_argument(
|
||||
"-K", "--force-platform", action="store_true",
|
||||
help="""Force testing on selected platforms,
|
||||
even if they are excluded in the test configuration (testcase.yaml)."""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-l", "--all", action="store_true",
|
||||
help="Build/test on all platforms. Any --platform arguments "
|
||||
"ignored.")
|
||||
|
||||
parser.add_argument("--list-tags", action="store_true",
|
||||
help="List all tags occurring in selected tests.")
|
||||
|
||||
parser.add_argument("--log-file", metavar="FILENAME", action="store",
|
||||
help="Specify a file where to save logs.")
|
||||
|
||||
parser.add_argument(
|
||||
"-M", "--runtime-artifact-cleanup", action="store_true",
|
||||
help="Delete artifacts of passing tests.")
|
||||
|
||||
test_xor_generator.add_argument(
|
||||
"-N", "--ninja", action="store_true", default="--make" not in sys.argv,
|
||||
help="Use the Ninja generator with CMake. (This is the default)",
|
||||
required="--short-build-path" in sys.argv)
|
||||
|
||||
test_xor_generator.add_argument(
|
||||
"-k", "--make", action="store_true",
|
||||
help="Use the unix Makefile generator with CMake.")
|
||||
|
||||
parser.add_argument(
|
||||
"-n", "--no-clean", action="store_true",
|
||||
help="Re-use the outdir before building. Will result in "
|
||||
"faster compilation since builds will be incremental.")
|
||||
|
||||
# To be removed in favor of --detailed-skipped-report
|
||||
parser.add_argument(
|
||||
"--no-skipped-report", action="store_true",
|
||||
help="""Do not report skipped test cases in junit output. [Experimental]
|
||||
""")
|
||||
|
||||
parser.add_argument(
|
||||
"--detailed-skipped-report", action="store_true",
|
||||
help="Generate a detailed report with all skipped test cases"
|
||||
"including those that are filtered based on testsuite definition."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-O", "--outdir",
|
||||
default=os.path.join(os.getcwd(), "twister-out"),
|
||||
help="Output directory for logs and binaries. "
|
||||
"Default is 'twister-out' in the current directory. "
|
||||
"This directory will be cleaned unless '--no-clean' is set. "
|
||||
"The '--clobber-output' option controls what cleaning does.")
|
||||
|
||||
parser.add_argument(
|
||||
"-o", "--report-dir",
|
||||
help="""Output reports containing results of the test run into the
|
||||
specified directory.
|
||||
The output will be both in JSON and JUNIT format
|
||||
(twister.json and twister.xml).
|
||||
""")
|
||||
|
||||
parser.add_argument("--overflow-as-errors", action="store_true",
|
||||
help="Treat RAM/SRAM overflows as errors.")
|
||||
|
||||
|
||||
parser.add_argument("-P", "--exclude-platform", action="append", default=[],
|
||||
help="""Exclude platforms and do not build or run any tests
|
||||
on those platforms. This option can be called multiple times.
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument("--persistent-hardware-map", action='store_true',
|
||||
help="""With --generate-hardware-map, tries to use
|
||||
persistent names for serial devices on platforms
|
||||
that support this feature (currently only Linux).
|
||||
""")
|
||||
|
||||
parser.add_argument(
|
||||
"-p", "--platform", action="append",
|
||||
help="Platform filter for testing. This option may be used multiple "
|
||||
"times. Test suites will only be built/run on the platforms "
|
||||
"specified. If this option is not used, then platforms marked "
|
||||
"as default in the platform metadata file will be chosen "
|
||||
"to build and test. ")
|
||||
|
||||
parser.add_argument(
|
||||
"--platform-reports", action="store_true",
|
||||
help="""Create individual reports for each platform.
|
||||
""")
|
||||
|
||||
parser.add_argument("--pre-script",
|
||||
help="""specify a pre script. This will be executed
|
||||
before device handler open serial port and invoke runner.
|
||||
""")
|
||||
|
||||
parser.add_argument("-Q", "--error-on-deprecations", action="store_false",
|
||||
help="Error on deprecation warnings.")
|
||||
|
||||
parser.add_argument(
|
||||
"--quarantine-list",
|
||||
metavar="FILENAME",
|
||||
help="Load list of test scenarios under quarantine. The entries in "
|
||||
"the file need to correspond to the test scenarios names as in "
|
||||
"corresponding tests .yaml files. These scenarios "
|
||||
"will be skipped with quarantine as the reason.")
|
||||
|
||||
parser.add_argument(
|
||||
"--quarantine-verify",
|
||||
action="store_true",
|
||||
help="Use the list of test scenarios under quarantine and run them"
|
||||
"to verify their current status.")
|
||||
|
||||
parser.add_argument("-R", "--enable-asserts", action="store_true",
|
||||
default=True,
|
||||
help="deprecated, left for compatibility")
|
||||
|
||||
parser.add_argument("--report-excluded",
|
||||
action="store_true",
|
||||
help="""List all tests that are never run based on current scope and
|
||||
coverage. If you are looking for accurate results, run this with
|
||||
--all, but this will take a while...""")
|
||||
|
||||
parser.add_argument(
|
||||
"--report-name",
|
||||
help="""Create a report with a custom name.
|
||||
""")
|
||||
|
||||
parser.add_argument(
|
||||
"--report-suffix",
|
||||
help="""Add a suffix to all generated file names, for example to add a
|
||||
version or a commit ID.
|
||||
""")
|
||||
|
||||
parser.add_argument(
|
||||
"--retry-failed", type=int, default=0,
|
||||
help="Retry failing tests again, up to the number of times specified.")
|
||||
|
||||
parser.add_argument(
|
||||
"--retry-interval", type=int, default=60,
|
||||
help="Retry failing tests after specified period of time.")
|
||||
|
||||
parser.add_argument(
|
||||
"--retry-build-errors", action="store_true",
|
||||
help="Retry build errors as well.")
|
||||
|
||||
parser.add_argument(
|
||||
"-S", "--enable-slow", action="store_true",
|
||||
help="Execute time-consuming test cases that have been marked "
|
||||
"as 'slow' in testcase.yaml. Normally these are only built.")
|
||||
|
||||
parser.add_argument(
|
||||
"--seed", type=int,
|
||||
help="Seed for native posix pseudo-random number generator")
|
||||
|
||||
parser.add_argument(
|
||||
"--short-build-path",
|
||||
action="store_true",
|
||||
help="Create shorter build directory paths based on symbolic links. "
|
||||
"The shortened build path will be used by CMake for generating "
|
||||
"the build system and executing the build. Use this option if "
|
||||
"you experience build failures related to path length, for "
|
||||
"example on Windows OS. This option can be used only with "
|
||||
"'--ninja' argument (to use Ninja build generator).")
|
||||
|
||||
parser.add_argument(
|
||||
"--show-footprint", action="store_true",
|
||||
help="Show footprint statistics and deltas since last release."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-t", "--tag", action="append",
|
||||
help="Specify tags to restrict which tests to run by tag value. "
|
||||
"Default is to not do any tag filtering. Multiple invocations "
|
||||
"are treated as a logical 'or' relationship.")
|
||||
|
||||
parser.add_argument("--timestamps",
|
||||
action="store_true",
|
||||
help="Print all messages with time stamps.")
|
||||
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--no-update",
|
||||
action="store_true",
|
||||
help="Do not update the results of the last run of twister.")
|
||||
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="count",
|
||||
default=0,
|
||||
help="Emit debugging information, call multiple times to increase "
|
||||
"verbosity.")
|
||||
|
||||
parser.add_argument("-W", "--disable-warnings-as-errors", action="store_true",
|
||||
help="Do not treat warning conditions as errors.")
|
||||
|
||||
parser.add_argument(
|
||||
"--west-flash", nargs='?', const=[],
|
||||
help="""Uses west instead of ninja or make to flash when running with
|
||||
--device-testing. Supports comma-separated argument list.
|
||||
|
||||
E.g "twister --device-testing --device-serial /dev/ttyACM0
|
||||
--west-flash="--board-id=foobar,--erase"
|
||||
will translate to "west flash -- --board-id=foobar --erase"
|
||||
|
||||
NOTE: device-testing must be enabled to use this option.
|
||||
"""
|
||||
)
|
||||
parser.add_argument(
|
||||
"--west-runner",
|
||||
help="""Uses the specified west runner instead of default when running
|
||||
with --west-flash.
|
||||
|
||||
E.g "twister --device-testing --device-serial /dev/ttyACM0
|
||||
--west-flash --west-runner=pyocd"
|
||||
will translate to "west flash --runner pyocd"
|
||||
|
||||
NOTE: west-flash must be enabled to use this option.
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-X", "--fixture", action="append", default=[],
|
||||
help="Specify a fixture that a board might support.")
|
||||
|
||||
parser.add_argument(
|
||||
"-x", "--extra-args", action="append", default=[],
|
||||
help="""Extra CMake cache entries to define when building test cases.
|
||||
May be called multiple times. The key-value entries will be
|
||||
prefixed with -D before being passed to CMake.
|
||||
E.g
|
||||
"twister -x=USE_CCACHE=0"
|
||||
will translate to
|
||||
"cmake -DUSE_CCACHE=0"
|
||||
which will ultimately disable ccache.
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-y", "--dry-run", action="store_true",
|
||||
help="""Create the filtered list of test cases, but don't actually
|
||||
run them. Useful if you're just interested in the test plan
|
||||
generated for every run and saved in the specified output
|
||||
directory (testplan.json).
|
||||
""")
|
||||
|
||||
parser.add_argument(
|
||||
"-z", "--size", action="append",
|
||||
help="Don't run twister. Instead, produce a report to "
|
||||
"stdout detailing RAM/ROM sizes on the specified filenames. "
|
||||
"All other command line arguments ignored.")
|
||||
|
||||
options = parser.parse_args()
|
||||
|
||||
# Very early error handling
|
||||
if options.device_serial_pty and os.name == "nt": # OS is Windows
|
||||
logger.error("--device-serial-pty is not supported on Windows OS")
|
||||
sys.exit(1)
|
||||
|
||||
if options.west_runner and options.west_flash is None:
|
||||
logger.error("west-runner requires west-flash to be enabled")
|
||||
sys.exit(1)
|
||||
|
||||
if options.west_flash and not options.device_testing:
|
||||
logger.error("west-flash requires device-testing to be enabled")
|
||||
sys.exit(1)
|
||||
|
||||
if not options.testsuite_root:
|
||||
options.testsuite_root = [os.path.join(ZEPHYR_BASE, "tests"),
|
||||
os.path.join(ZEPHYR_BASE, "samples")]
|
||||
|
||||
if options.show_footprint or options.compare_report:
|
||||
options.enable_size_report = True
|
||||
|
||||
if options.coverage:
|
||||
options.enable_coverage = True
|
||||
|
||||
if not options.coverage_platform:
|
||||
options.coverage_platform = options.platform
|
||||
|
||||
if options.enable_valgrind and not shutil.which("valgrind"):
|
||||
logger.error("valgrind enabled but valgrind executable not found")
|
||||
sys.exit(1)
|
||||
|
||||
if options.device_testing and (options.device_serial or options.device_serial_pty) and len(options.platform) > 1:
|
||||
logger.error("""When --device-testing is used with
|
||||
--device-serial or --device-serial-pty,
|
||||
only one platform is allowed""")
|
||||
sys.exit(1)
|
||||
|
||||
if options.size:
|
||||
from twister.size_calc import SizeCalculator
|
||||
for fn in options.size:
|
||||
sc = SizeCalculator(fn, [])
|
||||
sc.size_report()
|
||||
sys.exit(1)
|
||||
|
||||
return options
|
||||
|
||||
def setup_logging(outdir, log_file, verbose, timestamps):
|
||||
# create file handler which logs even debug messages
|
||||
if log_file:
|
||||
|
@ -849,7 +243,7 @@ def init_color(colorama_strip):
|
|||
def main():
|
||||
start_time = time.time()
|
||||
|
||||
options = parse_arguments()
|
||||
options = parse_arguments(sys.argv[1:])
|
||||
|
||||
# Configure color output
|
||||
color_strip = False if options.force_color else None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue