2020-03-24 14:40:28 -04:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
# vim: set syntax=python ts=4 :
|
|
|
|
#
|
2024-11-28 10:05:29 +01:00
|
|
|
# Copyright (c) 2018-2024 Intel Corporation
|
2024-10-16 17:35:10 +01:00
|
|
|
# Copyright (c) 2024 Arm Limited (or its affiliates). All rights reserved.
|
|
|
|
#
|
2020-03-24 14:40:28 -04:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2024-11-27 10:37:51 +00:00
|
|
|
import collections
|
|
|
|
import copy
|
2024-12-05 10:16:03 -05:00
|
|
|
import itertools
|
2024-11-27 10:37:51 +00:00
|
|
|
import json
|
|
|
|
import logging
|
2020-03-24 14:40:28 -04:00
|
|
|
import os
|
2024-11-27 10:37:51 +00:00
|
|
|
import random
|
2020-03-24 14:40:28 -04:00
|
|
|
import re
|
|
|
|
import subprocess
|
2024-11-27 10:37:51 +00:00
|
|
|
import sys
|
|
|
|
from argparse import Namespace
|
2022-06-11 16:46:33 -04:00
|
|
|
from collections import OrderedDict
|
|
|
|
from itertools import islice
|
2023-08-03 10:56:41 +01:00
|
|
|
from pathlib import Path
|
2024-11-27 10:37:51 +00:00
|
|
|
|
|
|
|
import snippets
|
2022-06-23 16:45:38 -04:00
|
|
|
|
2022-06-11 16:46:33 -04:00
|
|
|
try:
|
2024-11-27 10:37:51 +00:00
|
|
|
from anytree import Node, RenderTree, find
|
2022-06-11 16:46:33 -04:00
|
|
|
except ImportError:
|
|
|
|
print("Install the anytree module to use the --test-tree option")
|
2020-09-23 06:28:50 -07:00
|
|
|
|
2024-11-27 10:37:51 +00:00
|
|
|
import scl
|
|
|
|
from twisterlib.config_parser import TwisterConfigParser
|
2022-06-23 17:40:57 -04:00
|
|
|
from twisterlib.error import TwisterRuntimeError
|
twister: Account for board & SoC extensions
Problem
-------
Board & SoC extensions are used to define out-of-tree board variants or
SoC qualifiers. When a board is extended, it has multiple directories
associated with it (each with its own `board.yml`), where twister should
be able to find additional platform files to support these qualifiers.
Currently, this doesn't work, because twister only traverses the primary
BOARD_DIR and ignores the rest.
The fix would've been trivial in the case of "legacy" platform files,
i.e. those of the form `<normalized_board_target>.yaml`, but it's less
straightforward for the newly introduced `twister.yaml` format.
A `twister.yaml` file contains platform configuration that can be shared
by multiple board targets and tweaked for specific targets by using the
top-level `variants` key. Normally, there is at most one `twister.yaml`
per board, but the file isn't necessarily unique to one board. Instead,
it's unique to one directory, which may define multiple boards (as is
the case with e.g. `boards/qemu/x86/`).
With extensions in the picture, the goal is to initialize platforms when
given multiple `twister.yaml` per board. The OOT files are expected to
only provide information about OOT board targets, without being able to
override in-tree targets (same principle as in the Zephyr build system).
Solution
--------
The `twister.yaml` handling is broken up into multiple passes - first
loading all the files, then splitting the `variants` keys apart from the
shared configuration, before constructing the Platform instances.
The purpose of the split is to treat the variant information as global,
instead of making unnecessary or faulty assumptions about locality.
Remember that the build system can derive board target names not only
from `board.yml`, but from `soc.yml` too. Considering that any board may
end up using an OOT-extended SoC (and hence multiple `soc.yml` files),
not every board target can be said to belong to some board dir.
Unlike the variant data, the remaining top-level config is still rooted
to the primary BOARD_DIR and inherited by the extension dirs from there.
This is quite intuitive in most imagined cases, but there is a caveat:
if a `twister.yaml` resides in an extension dir, then it is allowed to
have a top-level config of its own, but it will be silently ignored.
This is to support corner cases where, much like how a single board dir
can define multiple boards, a single board dir can also extend multiple
boards, or even do both. In those cases, the primary BOARD_DIR rule
should make it unambiguous which config belongs to which board, even if
it may seem counter-intuitive at first.
For concrete examples of what this means, please see the newly added
platform unit tests.
As part of these functional changes, a good chunk of logic is moved out
of `TestPlan.add_configurations()` into a new function in `platform.py`.
This is because recombining the top-level and variant configs requires
direct manipulation of the loaded YAML contents, which would be improper
to do outside of the module responsible for encapsulating this data.
Signed-off-by: Grzegorz Swiderski <grzegorz.swiderski@nordicsemi.no>
2025-01-31 13:03:58 +01:00
|
|
|
from twisterlib.platform import Platform, generate_platforms
|
2024-11-27 10:37:51 +00:00
|
|
|
from twisterlib.quarantine import Quarantine
|
2024-05-28 12:31:53 +00:00
|
|
|
from twisterlib.statuses import TwisterStatus
|
2022-06-23 17:40:57 -04:00
|
|
|
from twisterlib.testinstance import TestInstance
|
2024-11-27 10:37:51 +00:00
|
|
|
from twisterlib.testsuite import TestSuite, scan_testsuite_path
|
2022-08-09 11:13:54 +02:00
|
|
|
from zephyr_module import parse_modules
|
2022-06-09 09:38:39 -04:00
|
|
|
|
2024-11-27 10:27:24 +00:00
|
|
|
logger = logging.getLogger('twister')
|
|
|
|
|
2020-03-24 14:40:28 -04:00
|
|
|
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
|
|
|
|
if not ZEPHYR_BASE:
|
|
|
|
sys.exit("$ZEPHYR_BASE environment variable undefined")
|
|
|
|
|
2020-07-08 14:43:07 -07:00
|
|
|
# This is needed to load edt.pickle files.
|
2021-03-26 16:18:58 -07:00
|
|
|
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
|
|
|
|
"python-devicetree", "src"))
|
|
|
|
from devicetree import edtlib # pylint: disable=unused-import
|
2020-03-24 14:40:28 -04:00
|
|
|
|
|
|
|
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
|
|
|
|
|
2022-05-14 09:56:47 -04:00
|
|
|
class Filters:
|
2023-09-19 17:42:02 +00:00
|
|
|
# platform keys
|
|
|
|
PLATFORM_KEY = 'platform key filter'
|
2022-05-14 09:56:47 -04:00
|
|
|
# filters provided on command line by the user/tester
|
|
|
|
CMD_LINE = 'command line filter'
|
|
|
|
# filters in the testsuite yaml definition
|
|
|
|
TESTSUITE = 'testsuite filter'
|
2023-11-28 15:15:52 -05:00
|
|
|
# filters in the testplan yaml definition
|
|
|
|
TESTPLAN = 'testplan filter'
|
2023-12-12 15:19:47 +08:00
|
|
|
# filters related to platform definition
|
2022-05-14 09:56:47 -04:00
|
|
|
PLATFORM = 'Platform related filter'
|
2022-10-12 20:19:43 -04:00
|
|
|
# in case a test suite was quarantined.
|
2023-09-19 11:24:32 +02:00
|
|
|
QUARANTINE = 'Quarantine filter'
|
2022-10-12 20:19:43 -04:00
|
|
|
# in case a test suite is skipped intentionally .
|
|
|
|
SKIP = 'Skip filter'
|
2023-09-20 13:21:34 +02:00
|
|
|
# in case of incompatibility between selected and allowed toolchains.
|
|
|
|
TOOLCHAIN = 'Toolchain filter'
|
2024-11-30 09:41:12 -05:00
|
|
|
# in case where an optional module is not available
|
2023-09-01 11:07:21 +00:00
|
|
|
MODULE = 'Module filter'
|
2024-11-30 09:41:12 -05:00
|
|
|
# in case of missing env. variable required for a platform
|
|
|
|
ENVIRONMENT = 'Environment filter'
|
2022-05-14 09:56:47 -04:00
|
|
|
|
|
|
|
|
2022-11-21 15:17:21 -05:00
|
|
|
class TestLevel:
|
|
|
|
name = None
|
|
|
|
levels = []
|
|
|
|
scenarios = []
|
|
|
|
|
2024-08-09 13:25:16 +02:00
|
|
|
|
2022-06-09 09:30:27 -04:00
|
|
|
class TestPlan:
|
2024-08-09 13:25:16 +02:00
|
|
|
__test__ = False # for pytest to skip this class when collects tests
|
2020-03-24 14:40:28 -04:00
|
|
|
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
|
|
|
|
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
|
|
|
|
|
2022-06-28 09:58:09 -04:00
|
|
|
suite_schema = scl.yaml_load(
|
2020-03-24 14:40:28 -04:00
|
|
|
os.path.join(ZEPHYR_BASE,
|
2022-03-23 14:07:54 -04:00
|
|
|
"scripts", "schemas", "twister", "testsuite-schema.yaml"))
|
2021-03-11 13:18:33 +01:00
|
|
|
quarantine_schema = scl.yaml_load(
|
|
|
|
os.path.join(ZEPHYR_BASE,
|
|
|
|
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2024-11-27 17:50:07 +00:00
|
|
|
tc_schema_path = os.path.join(
|
|
|
|
ZEPHYR_BASE,
|
|
|
|
"scripts",
|
|
|
|
"schemas",
|
|
|
|
"twister",
|
|
|
|
"test-config-schema.yaml"
|
|
|
|
)
|
2022-11-21 15:17:21 -05:00
|
|
|
|
2020-05-13 13:34:00 -07:00
|
|
|
SAMPLE_FILENAME = 'sample.yaml'
|
2022-03-23 14:07:54 -04:00
|
|
|
TESTSUITE_FILENAME = 'testcase.yaml'
|
2020-05-13 13:34:00 -07:00
|
|
|
|
2024-10-16 17:35:10 +01:00
|
|
|
def __init__(self, env: Namespace):
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2022-06-09 14:48:11 -04:00
|
|
|
self.options = env.options
|
2022-06-10 06:51:25 -04:00
|
|
|
self.env = env
|
|
|
|
|
2020-03-24 14:40:28 -04:00
|
|
|
# Keep track of which test cases we've filtered out and why
|
2022-03-23 14:07:54 -04:00
|
|
|
self.testsuites = {}
|
2022-11-22 07:28:29 -08:00
|
|
|
self.quarantine = None
|
2020-03-24 14:40:28 -04:00
|
|
|
self.platforms = []
|
2021-12-10 18:18:50 +01:00
|
|
|
self.platform_names = []
|
2020-03-24 14:40:28 -04:00
|
|
|
self.selected_platforms = []
|
|
|
|
self.default_platforms = []
|
|
|
|
self.load_errors = 0
|
|
|
|
self.instances = dict()
|
2024-05-06 15:17:54 +02:00
|
|
|
self.instance_fail_count = 0
|
2020-03-24 14:40:28 -04:00
|
|
|
self.warnings = 0
|
|
|
|
|
2022-11-21 15:17:21 -05:00
|
|
|
self.scenarios = []
|
|
|
|
|
2022-06-11 17:24:28 -04:00
|
|
|
self.hwm = env.hwm
|
2022-01-14 16:36:01 +01:00
|
|
|
# used during creating shorter build paths
|
|
|
|
self.link_dir_counter = 0
|
2022-04-18 22:34:39 -04:00
|
|
|
self.modules = []
|
|
|
|
|
2022-06-11 16:46:33 -04:00
|
|
|
self.run_individual_testsuite = []
|
2022-11-21 15:17:21 -05:00
|
|
|
self.levels = []
|
|
|
|
self.test_config = {}
|
|
|
|
|
2024-10-16 17:35:10 +01:00
|
|
|
self.name = "unnamed"
|
2022-11-21 15:17:21 -05:00
|
|
|
|
|
|
|
def get_level(self, name):
|
2024-11-27 11:49:19 +00:00
|
|
|
level = next((lvl for lvl in self.levels if lvl.name == name), None)
|
2022-11-21 15:17:21 -05:00
|
|
|
return level
|
|
|
|
|
|
|
|
def parse_configuration(self, config_file):
|
|
|
|
if os.path.exists(config_file):
|
|
|
|
tc_schema = scl.yaml_load(self.tc_schema_path)
|
|
|
|
self.test_config = scl.yaml_load_verify(config_file, tc_schema)
|
|
|
|
else:
|
|
|
|
raise TwisterRuntimeError(f"File {config_file} not found.")
|
|
|
|
|
|
|
|
levels = self.test_config.get('levels', [])
|
|
|
|
|
|
|
|
# Do first pass on levels to get initial data.
|
|
|
|
for level in levels:
|
|
|
|
adds = []
|
|
|
|
for s in level.get('adds', []):
|
|
|
|
r = re.compile(s)
|
|
|
|
adds.extend(list(filter(r.fullmatch, self.scenarios)))
|
|
|
|
|
|
|
|
tl = TestLevel()
|
|
|
|
tl.name = level['name']
|
|
|
|
tl.scenarios = adds
|
|
|
|
tl.levels = level.get('inherits', [])
|
|
|
|
self.levels.append(tl)
|
|
|
|
|
|
|
|
# Go over levels again to resolve inheritance.
|
|
|
|
for level in levels:
|
|
|
|
inherit = level.get('inherits', [])
|
|
|
|
_level = self.get_level(level['name'])
|
|
|
|
if inherit:
|
|
|
|
for inherted_level in inherit:
|
|
|
|
_inherited = self.get_level(inherted_level)
|
2024-10-16 17:35:10 +01:00
|
|
|
assert _inherited, "Unknown inherited level {inherted_level}"
|
2022-11-21 15:17:21 -05:00
|
|
|
_inherited_scenarios = _inherited.scenarios
|
2024-10-16 17:35:10 +01:00
|
|
|
level_scenarios = _level.scenarios if _level else []
|
2022-11-21 15:17:21 -05:00
|
|
|
level_scenarios.extend(_inherited_scenarios)
|
2022-06-11 16:46:33 -04:00
|
|
|
|
|
|
|
def find_subtests(self):
|
|
|
|
sub_tests = self.options.sub_test
|
|
|
|
if sub_tests:
|
|
|
|
for subtest in sub_tests:
|
2024-11-24 15:04:45 +01:00
|
|
|
_subtests = self.get_testcase(subtest)
|
2022-06-11 16:46:33 -04:00
|
|
|
for _subtest in _subtests:
|
|
|
|
self.run_individual_testsuite.append(_subtest.name)
|
|
|
|
|
|
|
|
if self.run_individual_testsuite:
|
|
|
|
logger.info("Running the following tests:")
|
|
|
|
for test in self.run_individual_testsuite:
|
2024-11-27 16:57:32 +00:00
|
|
|
print(f" - {test}")
|
2022-06-11 16:46:33 -04:00
|
|
|
else:
|
|
|
|
raise TwisterRuntimeError("Tests not found")
|
|
|
|
|
|
|
|
def discover(self):
|
|
|
|
self.handle_modules()
|
|
|
|
if self.options.test:
|
|
|
|
self.run_individual_testsuite = self.options.test
|
|
|
|
|
2024-08-09 17:17:40 -04:00
|
|
|
self.add_configurations()
|
2022-06-11 16:46:33 -04:00
|
|
|
num = self.add_testsuites(testsuite_filter=self.run_individual_testsuite)
|
|
|
|
if num == 0:
|
2024-12-11 07:18:00 -05:00
|
|
|
raise TwisterRuntimeError("No testsuites found at the specified location...")
|
2024-11-24 18:02:19 +01:00
|
|
|
if self.load_errors:
|
2024-11-27 17:50:07 +00:00
|
|
|
raise TwisterRuntimeError(
|
|
|
|
f"Found {self.load_errors} errors loading {num} test configurations."
|
|
|
|
)
|
2022-06-11 16:46:33 -04:00
|
|
|
|
|
|
|
self.find_subtests()
|
2022-11-21 15:17:21 -05:00
|
|
|
# get list of scenarios we have parsed into one list
|
|
|
|
for _, ts in self.testsuites.items():
|
|
|
|
self.scenarios.append(ts.id)
|
|
|
|
|
2023-05-08 14:34:44 +02:00
|
|
|
self.report_duplicates()
|
2022-11-21 15:17:21 -05:00
|
|
|
self.parse_configuration(config_file=self.env.test_config)
|
2022-06-11 16:46:33 -04:00
|
|
|
|
|
|
|
# handle quarantine
|
|
|
|
ql = self.options.quarantine_list
|
|
|
|
qv = self.options.quarantine_verify
|
2022-11-22 07:28:29 -08:00
|
|
|
if qv and not ql:
|
|
|
|
logger.error("No quarantine list given to be verified")
|
|
|
|
raise TwisterRuntimeError("No quarantine list given to be verified")
|
|
|
|
if ql:
|
2023-01-13 12:32:19 +01:00
|
|
|
for quarantine_file in ql:
|
2023-09-15 15:21:06 +02:00
|
|
|
try:
|
|
|
|
# validate quarantine yaml file against the provided schema
|
|
|
|
scl.yaml_load_verify(quarantine_file, self.quarantine_schema)
|
|
|
|
except scl.EmptyYamlFileException:
|
|
|
|
logger.debug(f'Quarantine file {quarantine_file} is empty')
|
2022-11-22 07:28:29 -08:00
|
|
|
self.quarantine = Quarantine(ql)
|
2022-06-11 16:46:33 -04:00
|
|
|
|
|
|
|
def load(self):
|
|
|
|
|
|
|
|
if self.options.report_suffix:
|
2024-11-27 17:50:07 +00:00
|
|
|
last_run = os.path.join(
|
|
|
|
self.options.outdir,
|
|
|
|
f"twister_{self.options.report_suffix}.json"
|
|
|
|
)
|
2022-06-11 16:46:33 -04:00
|
|
|
else:
|
|
|
|
last_run = os.path.join(self.options.outdir, "twister.json")
|
|
|
|
|
2024-05-06 15:17:54 +02:00
|
|
|
if self.options.only_failed or self.options.report_summary is not None:
|
2022-06-11 16:46:33 -04:00
|
|
|
self.load_from_file(last_run)
|
|
|
|
self.selected_platforms = set(p.platform.name for p in self.instances.values())
|
|
|
|
elif self.options.load_tests:
|
|
|
|
self.load_from_file(self.options.load_tests)
|
|
|
|
self.selected_platforms = set(p.platform.name for p in self.instances.values())
|
2022-06-11 17:24:28 -04:00
|
|
|
elif self.options.test_only:
|
2023-09-18 11:00:48 +02:00
|
|
|
# Get list of connected hardware and filter tests to only be run on connected hardware.
|
|
|
|
# If the platform does not exist in the hardware map or was not specified by --platform,
|
|
|
|
# just skip it.
|
2024-10-23 21:28:35 -04:00
|
|
|
|
|
|
|
connected_list = []
|
|
|
|
excluded_list = []
|
|
|
|
for _cp in self.options.platform:
|
|
|
|
if _cp in self.platform_names:
|
|
|
|
connected_list.append(self.get_platform(_cp).name)
|
|
|
|
|
2023-09-15 07:52:50 -07:00
|
|
|
if self.options.exclude_platform:
|
2024-10-23 21:28:35 -04:00
|
|
|
for _p in self.options.exclude_platform:
|
|
|
|
if _p in self.platform_names:
|
|
|
|
excluded_list.append(self.get_platform(_p).name)
|
|
|
|
for excluded in excluded_list:
|
2023-09-15 07:52:50 -07:00
|
|
|
if excluded in connected_list:
|
|
|
|
connected_list.remove(excluded)
|
2024-10-23 21:28:35 -04:00
|
|
|
|
2022-06-11 16:46:33 -04:00
|
|
|
self.load_from_file(last_run, filter_platform=connected_list)
|
|
|
|
self.selected_platforms = set(p.platform.name for p in self.instances.values())
|
|
|
|
else:
|
2022-06-11 17:24:28 -04:00
|
|
|
self.apply_filters()
|
2022-06-11 16:46:33 -04:00
|
|
|
|
2022-06-13 11:30:38 -04:00
|
|
|
if self.options.subset:
|
|
|
|
s = self.options.subset
|
|
|
|
try:
|
|
|
|
subset, sets = (int(x) for x in s.split("/"))
|
2024-11-27 13:00:44 +00:00
|
|
|
except ValueError as err:
|
|
|
|
raise TwisterRuntimeError("Bad subset value.") from err
|
2022-06-13 11:30:38 -04:00
|
|
|
|
|
|
|
if subset > sets:
|
|
|
|
raise TwisterRuntimeError("subset should not exceed the total number of sets")
|
|
|
|
|
|
|
|
if int(subset) > 0 and int(sets) >= int(subset):
|
2024-11-27 16:57:32 +00:00
|
|
|
logger.info(f"Running only a subset: {subset}/{sets}")
|
2022-06-13 11:30:38 -04:00
|
|
|
else:
|
2024-11-27 17:50:07 +00:00
|
|
|
raise TwisterRuntimeError(
|
|
|
|
f"You have provided a wrong subset value: {self.options.subset}."
|
|
|
|
)
|
2022-06-13 11:30:38 -04:00
|
|
|
|
|
|
|
self.generate_subset(subset, int(sets))
|
|
|
|
|
2022-06-11 16:46:33 -04:00
|
|
|
def generate_subset(self, subset, sets):
|
|
|
|
# Test instances are sorted depending on the context. For CI runs
|
|
|
|
# the execution order is: "plat1-testA, plat1-testB, ...,
|
|
|
|
# plat1-testZ, plat2-testA, ...". For hardware tests
|
|
|
|
# (device_testing), were multiple physical platforms can run the tests
|
|
|
|
# in parallel, it is more efficient to run in the order:
|
|
|
|
# "plat1-testA, plat2-testA, ..., plat1-testB, plat2-testB, ..."
|
|
|
|
if self.options.device_testing:
|
|
|
|
self.instances = OrderedDict(sorted(self.instances.items(),
|
|
|
|
key=lambda x: x[0][x[0].find("/") + 1:]))
|
|
|
|
else:
|
|
|
|
self.instances = OrderedDict(sorted(self.instances.items()))
|
|
|
|
|
2023-04-21 14:09:10 +02:00
|
|
|
if self.options.shuffle_tests:
|
|
|
|
seed_value = int.from_bytes(os.urandom(8), byteorder="big")
|
|
|
|
if self.options.shuffle_tests_seed is not None:
|
|
|
|
seed_value = self.options.shuffle_tests_seed
|
|
|
|
|
|
|
|
logger.info(f"Shuffle tests with seed: {seed_value}")
|
|
|
|
random.seed(seed_value)
|
|
|
|
temp_list = list(self.instances.items())
|
|
|
|
random.shuffle(temp_list)
|
|
|
|
self.instances = OrderedDict(temp_list)
|
|
|
|
|
2022-06-11 16:46:33 -04:00
|
|
|
# Do calculation based on what is actually going to be run and evaluated
|
|
|
|
# at runtime, ignore the cases we already know going to be skipped.
|
|
|
|
# This fixes an issue where some sets would get majority of skips and
|
|
|
|
# basically run nothing beside filtering.
|
2024-05-28 12:31:53 +00:00
|
|
|
to_run = {k : v for k,v in self.instances.items() if v.status == TwisterStatus.NONE}
|
2022-06-11 16:46:33 -04:00
|
|
|
total = len(to_run)
|
|
|
|
per_set = int(total / sets)
|
|
|
|
num_extra_sets = total - (per_set * sets)
|
|
|
|
|
|
|
|
# Try and be more fair for rounding error with integer division
|
|
|
|
# so the last subset doesn't get overloaded, we add 1 extra to
|
|
|
|
# subsets 1..num_extra_sets.
|
|
|
|
if subset <= num_extra_sets:
|
|
|
|
start = (subset - 1) * (per_set + 1)
|
|
|
|
end = start + per_set + 1
|
|
|
|
else:
|
|
|
|
base = num_extra_sets * (per_set + 1)
|
|
|
|
start = ((subset - num_extra_sets - 1) * per_set) + base
|
|
|
|
end = start + per_set
|
|
|
|
|
|
|
|
sliced_instances = islice(to_run.items(), start, end)
|
2024-05-28 12:31:53 +00:00
|
|
|
skipped = {k : v for k,v in self.instances.items() if v.status == TwisterStatus.SKIP}
|
|
|
|
errors = {k : v for k,v in self.instances.items() if v.status == TwisterStatus.ERROR}
|
2022-06-11 16:46:33 -04:00
|
|
|
self.instances = OrderedDict(sliced_instances)
|
|
|
|
if subset == 1:
|
|
|
|
# add all pre-filtered tests that are skipped or got error status
|
|
|
|
# to the first set to allow for better distribution among all sets.
|
|
|
|
self.instances.update(skipped)
|
|
|
|
self.instances.update(errors)
|
|
|
|
|
|
|
|
|
|
|
|
def handle_modules(self):
|
|
|
|
# get all enabled west projects
|
2022-08-09 11:13:54 +02:00
|
|
|
modules_meta = parse_modules(ZEPHYR_BASE)
|
2022-06-11 16:46:33 -04:00
|
|
|
self.modules = [module.meta.get('name') for module in modules_meta]
|
|
|
|
|
|
|
|
|
|
|
|
def report(self):
|
2023-05-08 14:34:44 +02:00
|
|
|
if self.options.test_tree:
|
2024-11-28 10:05:29 +01:00
|
|
|
if not self.options.detailed_test_id:
|
|
|
|
logger.info("Test tree is always shown with detailed test-id.")
|
2022-06-11 16:46:33 -04:00
|
|
|
self.report_test_tree()
|
|
|
|
return 0
|
|
|
|
elif self.options.list_tests:
|
2024-11-28 10:05:29 +01:00
|
|
|
if not self.options.detailed_test_id:
|
|
|
|
logger.info("Test list is always shown with detailed test-id.")
|
2022-06-11 16:46:33 -04:00
|
|
|
self.report_test_list()
|
|
|
|
return 0
|
|
|
|
elif self.options.list_tags:
|
|
|
|
self.report_tag_list()
|
|
|
|
return 0
|
|
|
|
|
|
|
|
return 1
|
|
|
|
|
|
|
|
def report_duplicates(self):
|
2022-11-21 15:17:21 -05:00
|
|
|
dupes = [item for item, count in collections.Counter(self.scenarios).items() if count > 1]
|
2022-06-11 16:46:33 -04:00
|
|
|
if dupes:
|
2023-05-08 14:34:44 +02:00
|
|
|
msg = "Duplicated test scenarios found:\n"
|
2022-06-11 16:46:33 -04:00
|
|
|
for dupe in dupes:
|
2024-11-27 16:57:32 +00:00
|
|
|
msg += (f"- {dupe} found in:\n")
|
2022-06-11 16:46:33 -04:00
|
|
|
for dc in self.get_testsuite(dupe):
|
2024-11-27 16:57:32 +00:00
|
|
|
msg += (f" - {dc.yamlfile}\n")
|
2023-05-08 14:34:44 +02:00
|
|
|
raise TwisterRuntimeError(msg)
|
2022-06-11 16:46:33 -04:00
|
|
|
else:
|
2023-05-08 14:34:44 +02:00
|
|
|
logger.debug("No duplicates found.")
|
2022-06-11 16:46:33 -04:00
|
|
|
|
|
|
|
def report_tag_list(self):
|
|
|
|
tags = set()
|
|
|
|
for _, tc in self.testsuites.items():
|
|
|
|
tags = tags.union(tc.tags)
|
|
|
|
|
|
|
|
for t in tags:
|
2024-11-27 16:57:32 +00:00
|
|
|
print(f"- {t}")
|
2022-06-11 16:46:33 -04:00
|
|
|
|
|
|
|
def report_test_tree(self):
|
2024-04-17 17:10:37 -05:00
|
|
|
tests_list = self.get_tests_list()
|
2022-06-11 16:46:33 -04:00
|
|
|
|
|
|
|
testsuite = Node("Testsuite")
|
|
|
|
samples = Node("Samples", parent=testsuite)
|
|
|
|
tests = Node("Tests", parent=testsuite)
|
|
|
|
|
2024-04-17 17:10:37 -05:00
|
|
|
for test in sorted(tests_list):
|
2022-06-11 16:46:33 -04:00
|
|
|
if test.startswith("sample."):
|
|
|
|
sec = test.split(".")
|
2024-11-27 13:13:48 +00:00
|
|
|
area = find(
|
|
|
|
samples,
|
|
|
|
lambda node, sname=sec[1]: node.name == sname and node.parent == samples
|
|
|
|
)
|
2022-06-11 16:46:33 -04:00
|
|
|
if not area:
|
|
|
|
area = Node(sec[1], parent=samples)
|
|
|
|
|
2022-06-23 17:40:57 -04:00
|
|
|
Node(test, parent=area)
|
2022-06-11 16:46:33 -04:00
|
|
|
else:
|
|
|
|
sec = test.split(".")
|
2024-11-27 13:13:48 +00:00
|
|
|
area = find(
|
|
|
|
tests,
|
|
|
|
lambda node, sname=sec[0]: node.name == sname and node.parent == tests
|
|
|
|
)
|
2022-06-11 16:46:33 -04:00
|
|
|
if not area:
|
|
|
|
area = Node(sec[0], parent=tests)
|
|
|
|
|
|
|
|
if area and len(sec) > 2:
|
2024-11-27 13:13:48 +00:00
|
|
|
subarea = find(
|
|
|
|
area, lambda node, sname=sec[1], sparent=area: node.name == sname
|
|
|
|
and node.parent == sparent
|
|
|
|
)
|
2022-06-11 16:46:33 -04:00
|
|
|
if not subarea:
|
|
|
|
subarea = Node(sec[1], parent=area)
|
2022-06-23 17:40:57 -04:00
|
|
|
Node(test, parent=subarea)
|
2022-06-11 16:46:33 -04:00
|
|
|
|
|
|
|
for pre, _, node in RenderTree(testsuite):
|
2024-11-27 16:57:32 +00:00
|
|
|
print(f"{pre}{node.name}")
|
2022-06-11 16:46:33 -04:00
|
|
|
|
|
|
|
def report_test_list(self):
|
2024-04-17 17:10:37 -05:00
|
|
|
tests_list = self.get_tests_list()
|
2022-06-11 16:46:33 -04:00
|
|
|
|
2024-04-17 17:10:37 -05:00
|
|
|
cnt = 0
|
|
|
|
for test in sorted(tests_list):
|
2022-06-11 16:46:33 -04:00
|
|
|
cnt = cnt + 1
|
2024-11-27 16:57:32 +00:00
|
|
|
print(f" - {test}")
|
|
|
|
print(f"{cnt} total.")
|
2022-06-11 16:46:33 -04:00
|
|
|
|
2020-03-24 14:40:28 -04:00
|
|
|
|
|
|
|
# Debug Functions
|
|
|
|
@staticmethod
|
|
|
|
def info(what):
|
|
|
|
sys.stdout.write(what + "\n")
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
def add_configurations(self):
|
2022-09-14 22:23:15 +02:00
|
|
|
# Create a list of board roots as defined by the build system in general
|
|
|
|
# Note, internally in twister a board root includes the `boards` folder
|
|
|
|
# but in Zephyr build system, the board root is without the `boards` in folder path.
|
|
|
|
board_roots = [Path(os.path.dirname(root)) for root in self.env.board_roots]
|
twister: Account for board & SoC extensions
Problem
-------
Board & SoC extensions are used to define out-of-tree board variants or
SoC qualifiers. When a board is extended, it has multiple directories
associated with it (each with its own `board.yml`), where twister should
be able to find additional platform files to support these qualifiers.
Currently, this doesn't work, because twister only traverses the primary
BOARD_DIR and ignores the rest.
The fix would've been trivial in the case of "legacy" platform files,
i.e. those of the form `<normalized_board_target>.yaml`, but it's less
straightforward for the newly introduced `twister.yaml` format.
A `twister.yaml` file contains platform configuration that can be shared
by multiple board targets and tweaked for specific targets by using the
top-level `variants` key. Normally, there is at most one `twister.yaml`
per board, but the file isn't necessarily unique to one board. Instead,
it's unique to one directory, which may define multiple boards (as is
the case with e.g. `boards/qemu/x86/`).
With extensions in the picture, the goal is to initialize platforms when
given multiple `twister.yaml` per board. The OOT files are expected to
only provide information about OOT board targets, without being able to
override in-tree targets (same principle as in the Zephyr build system).
Solution
--------
The `twister.yaml` handling is broken up into multiple passes - first
loading all the files, then splitting the `variants` keys apart from the
shared configuration, before constructing the Platform instances.
The purpose of the split is to treat the variant information as global,
instead of making unnecessary or faulty assumptions about locality.
Remember that the build system can derive board target names not only
from `board.yml`, but from `soc.yml` too. Considering that any board may
end up using an OOT-extended SoC (and hence multiple `soc.yml` files),
not every board target can be said to belong to some board dir.
Unlike the variant data, the remaining top-level config is still rooted
to the primary BOARD_DIR and inherited by the extension dirs from there.
This is quite intuitive in most imagined cases, but there is a caveat:
if a `twister.yaml` resides in an extension dir, then it is allowed to
have a top-level config of its own, but it will be silently ignored.
This is to support corner cases where, much like how a single board dir
can define multiple boards, a single board dir can also extend multiple
boards, or even do both. In those cases, the primary BOARD_DIR rule
should make it unambiguous which config belongs to which board, even if
it may seem counter-intuitive at first.
For concrete examples of what this means, please see the newly added
platform unit tests.
As part of these functional changes, a good chunk of logic is moved out
of `TestPlan.add_configurations()` into a new function in `platform.py`.
This is because recombining the top-level and variant configs requires
direct manipulation of the loaded YAML contents, which would be improper
to do outside of the module responsible for encapsulating this data.
Signed-off-by: Grzegorz Swiderski <grzegorz.swiderski@nordicsemi.no>
2025-01-31 13:03:58 +01:00
|
|
|
soc_roots = self.env.soc_roots
|
|
|
|
arch_roots = self.env.arch_roots
|
2024-08-16 07:09:57 -04:00
|
|
|
|
2022-09-14 22:23:15 +02:00
|
|
|
platform_config = self.test_config.get('platforms', {})
|
2022-11-21 15:17:21 -05:00
|
|
|
|
twister: Account for board & SoC extensions
Problem
-------
Board & SoC extensions are used to define out-of-tree board variants or
SoC qualifiers. When a board is extended, it has multiple directories
associated with it (each with its own `board.yml`), where twister should
be able to find additional platform files to support these qualifiers.
Currently, this doesn't work, because twister only traverses the primary
BOARD_DIR and ignores the rest.
The fix would've been trivial in the case of "legacy" platform files,
i.e. those of the form `<normalized_board_target>.yaml`, but it's less
straightforward for the newly introduced `twister.yaml` format.
A `twister.yaml` file contains platform configuration that can be shared
by multiple board targets and tweaked for specific targets by using the
top-level `variants` key. Normally, there is at most one `twister.yaml`
per board, but the file isn't necessarily unique to one board. Instead,
it's unique to one directory, which may define multiple boards (as is
the case with e.g. `boards/qemu/x86/`).
With extensions in the picture, the goal is to initialize platforms when
given multiple `twister.yaml` per board. The OOT files are expected to
only provide information about OOT board targets, without being able to
override in-tree targets (same principle as in the Zephyr build system).
Solution
--------
The `twister.yaml` handling is broken up into multiple passes - first
loading all the files, then splitting the `variants` keys apart from the
shared configuration, before constructing the Platform instances.
The purpose of the split is to treat the variant information as global,
instead of making unnecessary or faulty assumptions about locality.
Remember that the build system can derive board target names not only
from `board.yml`, but from `soc.yml` too. Considering that any board may
end up using an OOT-extended SoC (and hence multiple `soc.yml` files),
not every board target can be said to belong to some board dir.
Unlike the variant data, the remaining top-level config is still rooted
to the primary BOARD_DIR and inherited by the extension dirs from there.
This is quite intuitive in most imagined cases, but there is a caveat:
if a `twister.yaml` resides in an extension dir, then it is allowed to
have a top-level config of its own, but it will be silently ignored.
This is to support corner cases where, much like how a single board dir
can define multiple boards, a single board dir can also extend multiple
boards, or even do both. In those cases, the primary BOARD_DIR rule
should make it unambiguous which config belongs to which board, even if
it may seem counter-intuitive at first.
For concrete examples of what this means, please see the newly added
platform unit tests.
As part of these functional changes, a good chunk of logic is moved out
of `TestPlan.add_configurations()` into a new function in `platform.py`.
This is because recombining the top-level and variant configs requires
direct manipulation of the loaded YAML contents, which would be improper
to do outside of the module responsible for encapsulating this data.
Signed-off-by: Grzegorz Swiderski <grzegorz.swiderski@nordicsemi.no>
2025-01-31 13:03:58 +01:00
|
|
|
for platform in generate_platforms(board_roots, soc_roots, arch_roots):
|
2024-08-09 17:17:40 -04:00
|
|
|
if not platform.twister:
|
twister: Account for board & SoC extensions
Problem
-------
Board & SoC extensions are used to define out-of-tree board variants or
SoC qualifiers. When a board is extended, it has multiple directories
associated with it (each with its own `board.yml`), where twister should
be able to find additional platform files to support these qualifiers.
Currently, this doesn't work, because twister only traverses the primary
BOARD_DIR and ignores the rest.
The fix would've been trivial in the case of "legacy" platform files,
i.e. those of the form `<normalized_board_target>.yaml`, but it's less
straightforward for the newly introduced `twister.yaml` format.
A `twister.yaml` file contains platform configuration that can be shared
by multiple board targets and tweaked for specific targets by using the
top-level `variants` key. Normally, there is at most one `twister.yaml`
per board, but the file isn't necessarily unique to one board. Instead,
it's unique to one directory, which may define multiple boards (as is
the case with e.g. `boards/qemu/x86/`).
With extensions in the picture, the goal is to initialize platforms when
given multiple `twister.yaml` per board. The OOT files are expected to
only provide information about OOT board targets, without being able to
override in-tree targets (same principle as in the Zephyr build system).
Solution
--------
The `twister.yaml` handling is broken up into multiple passes - first
loading all the files, then splitting the `variants` keys apart from the
shared configuration, before constructing the Platform instances.
The purpose of the split is to treat the variant information as global,
instead of making unnecessary or faulty assumptions about locality.
Remember that the build system can derive board target names not only
from `board.yml`, but from `soc.yml` too. Considering that any board may
end up using an OOT-extended SoC (and hence multiple `soc.yml` files),
not every board target can be said to belong to some board dir.
Unlike the variant data, the remaining top-level config is still rooted
to the primary BOARD_DIR and inherited by the extension dirs from there.
This is quite intuitive in most imagined cases, but there is a caveat:
if a `twister.yaml` resides in an extension dir, then it is allowed to
have a top-level config of its own, but it will be silently ignored.
This is to support corner cases where, much like how a single board dir
can define multiple boards, a single board dir can also extend multiple
boards, or even do both. In those cases, the primary BOARD_DIR rule
should make it unambiguous which config belongs to which board, even if
it may seem counter-intuitive at first.
For concrete examples of what this means, please see the newly added
platform unit tests.
As part of these functional changes, a good chunk of logic is moved out
of `TestPlan.add_configurations()` into a new function in `platform.py`.
This is because recombining the top-level and variant configs requires
direct manipulation of the loaded YAML contents, which would be improper
to do outside of the module responsible for encapsulating this data.
Signed-off-by: Grzegorz Swiderski <grzegorz.swiderski@nordicsemi.no>
2025-01-31 13:03:58 +01:00
|
|
|
continue
|
2024-08-09 17:17:40 -04:00
|
|
|
self.platforms.append(platform)
|
|
|
|
|
|
|
|
if not platform_config.get('override_default_platforms', False):
|
|
|
|
if platform.default:
|
|
|
|
self.default_platforms.append(platform.name)
|
|
|
|
#logger.debug(f"adding {platform.name} to default platforms")
|
|
|
|
continue
|
|
|
|
for pp in platform_config.get('default_platforms', []):
|
|
|
|
if pp in platform.aliases:
|
|
|
|
logger.debug(f"adding {platform.name} to default platforms (override mode)")
|
|
|
|
self.default_platforms.append(platform.name)
|
|
|
|
|
|
|
|
self.platform_names = [a for p in self.platforms for a in p.aliases]
|
2022-06-09 07:53:14 -04:00
|
|
|
|
|
|
|
def get_all_tests(self):
|
|
|
|
testcases = []
|
|
|
|
for _, ts in self.testsuites.items():
|
|
|
|
for case in ts.testcases:
|
2022-08-12 06:21:28 -04:00
|
|
|
testcases.append(case.name)
|
2022-06-09 07:53:14 -04:00
|
|
|
|
|
|
|
return testcases
|
|
|
|
|
2024-04-17 17:10:37 -05:00
|
|
|
def get_tests_list(self):
|
|
|
|
testcases = []
|
|
|
|
if tag_filter := self.options.tag:
|
|
|
|
for _, ts in self.testsuites.items():
|
|
|
|
if ts.tags.intersection(tag_filter):
|
|
|
|
for case in ts.testcases:
|
2024-11-28 10:05:29 +01:00
|
|
|
testcases.append(case.detailed_name)
|
2024-04-17 17:10:37 -05:00
|
|
|
else:
|
|
|
|
for _, ts in self.testsuites.items():
|
|
|
|
for case in ts.testcases:
|
2024-11-28 10:05:29 +01:00
|
|
|
testcases.append(case.detailed_name)
|
2024-04-17 17:10:37 -05:00
|
|
|
|
|
|
|
if exclude_tag := self.options.exclude_tag:
|
|
|
|
for _, ts in self.testsuites.items():
|
|
|
|
if ts.tags.intersection(exclude_tag):
|
|
|
|
for case in ts.testcases:
|
2024-11-28 10:05:29 +01:00
|
|
|
if case.detailed_name in testcases:
|
|
|
|
testcases.remove(case.detailed_name)
|
2024-04-17 17:10:37 -05:00
|
|
|
return testcases
|
|
|
|
|
2024-11-27 13:30:20 +00:00
|
|
|
def add_testsuites(self, testsuite_filter=None):
|
|
|
|
if testsuite_filter is None:
|
|
|
|
testsuite_filter = []
|
2022-06-23 13:16:28 -04:00
|
|
|
for root in self.env.test_roots:
|
2022-06-09 07:53:14 -04:00
|
|
|
root = os.path.abspath(root)
|
|
|
|
|
2024-12-11 07:18:00 -05:00
|
|
|
logger.debug(f"Reading testsuite configuration files under {root}...")
|
2022-06-09 07:53:14 -04:00
|
|
|
|
|
|
|
for dirpath, _, filenames in os.walk(root, topdown=True):
|
|
|
|
if self.SAMPLE_FILENAME in filenames:
|
|
|
|
filename = self.SAMPLE_FILENAME
|
|
|
|
elif self.TESTSUITE_FILENAME in filenames:
|
|
|
|
filename = self.TESTSUITE_FILENAME
|
|
|
|
else:
|
|
|
|
continue
|
|
|
|
|
2022-06-28 09:58:09 -04:00
|
|
|
logger.debug("Found possible testsuite in " + dirpath)
|
2022-06-09 07:53:14 -04:00
|
|
|
|
2022-06-28 09:58:09 -04:00
|
|
|
suite_yaml_path = os.path.join(dirpath, filename)
|
2023-02-21 21:32:47 +00:00
|
|
|
suite_path = os.path.dirname(suite_yaml_path)
|
2022-06-09 07:53:14 -04:00
|
|
|
|
2023-05-11 19:09:12 +02:00
|
|
|
for alt_config_root in self.env.alt_config_root:
|
|
|
|
alt_config = os.path.join(os.path.abspath(alt_config_root),
|
|
|
|
os.path.relpath(suite_path, root),
|
|
|
|
filename)
|
|
|
|
if os.path.exists(alt_config):
|
2024-11-27 16:57:32 +00:00
|
|
|
logger.info(
|
|
|
|
f"Using alternative configuration from {os.path.normpath(alt_config)}"
|
|
|
|
)
|
2023-05-11 19:09:12 +02:00
|
|
|
suite_yaml_path = alt_config
|
|
|
|
break
|
|
|
|
|
2022-06-09 07:53:14 -04:00
|
|
|
try:
|
2022-06-28 09:58:09 -04:00
|
|
|
parsed_data = TwisterConfigParser(suite_yaml_path, self.suite_schema)
|
2022-06-09 07:53:14 -04:00
|
|
|
parsed_data.load()
|
2024-07-15 13:47:45 +02:00
|
|
|
subcases = None
|
|
|
|
ztest_suite_names = None
|
2022-06-09 07:53:14 -04:00
|
|
|
|
2024-11-27 11:09:51 +00:00
|
|
|
for name in parsed_data.scenarios:
|
2022-06-28 09:58:09 -04:00
|
|
|
suite_dict = parsed_data.get_scenario(name)
|
2024-11-27 17:50:07 +00:00
|
|
|
suite = TestSuite(
|
|
|
|
root,
|
|
|
|
suite_path,
|
|
|
|
name,
|
|
|
|
data=suite_dict,
|
|
|
|
detailed_test_id=self.options.detailed_test_id
|
|
|
|
)
|
2024-08-09 17:17:40 -04:00
|
|
|
|
|
|
|
# convert to fully qualified names
|
2024-12-04 21:56:07 -05:00
|
|
|
suite.integration_platforms = self.verify_platforms_existence(
|
|
|
|
suite.integration_platforms,
|
|
|
|
f"integration_platforms in {suite.name}")
|
|
|
|
suite.platform_exclude = self.verify_platforms_existence(
|
|
|
|
suite.platform_exclude,
|
|
|
|
f"platform_exclude in {suite.name}")
|
|
|
|
suite.platform_allow = self.verify_platforms_existence(
|
|
|
|
suite.platform_allow,
|
|
|
|
f"platform_allow in {suite.name}")
|
2024-08-09 17:17:40 -04:00
|
|
|
|
2024-07-15 13:47:45 +02:00
|
|
|
if suite.harness in ['ztest', 'test']:
|
|
|
|
if subcases is None:
|
|
|
|
# scan it only once per testsuite
|
|
|
|
subcases, ztest_suite_names = scan_testsuite_path(suite_path)
|
|
|
|
suite.add_subcases(suite_dict, subcases, ztest_suite_names)
|
|
|
|
else:
|
|
|
|
suite.add_subcases(suite_dict)
|
2024-11-24 18:02:19 +01:00
|
|
|
|
2022-06-09 07:53:14 -04:00
|
|
|
if testsuite_filter:
|
2024-01-30 15:54:22 -05:00
|
|
|
scenario = os.path.basename(suite.name)
|
2024-11-27 17:50:07 +00:00
|
|
|
if (
|
|
|
|
suite.name
|
|
|
|
and (suite.name in testsuite_filter or scenario in testsuite_filter)
|
|
|
|
):
|
2022-06-28 09:58:09 -04:00
|
|
|
self.testsuites[suite.name] = suite
|
2024-11-24 18:02:19 +01:00
|
|
|
elif suite.name in self.testsuites:
|
2024-11-27 17:50:07 +00:00
|
|
|
msg = (
|
|
|
|
f"test suite '{suite.name}' in '{suite.yamlfile}' is already added"
|
|
|
|
)
|
2024-11-24 18:02:19 +01:00
|
|
|
if suite.yamlfile == self.testsuites[suite.name].yamlfile:
|
|
|
|
logger.debug(f"Skip - {msg}")
|
|
|
|
else:
|
2024-11-27 17:50:07 +00:00
|
|
|
msg = (
|
|
|
|
f"Duplicate {msg} from '{self.testsuites[suite.name].yamlfile}'"
|
|
|
|
)
|
2024-11-24 18:02:19 +01:00
|
|
|
raise TwisterRuntimeError(msg)
|
2022-06-09 07:53:14 -04:00
|
|
|
else:
|
2022-06-28 09:58:09 -04:00
|
|
|
self.testsuites[suite.name] = suite
|
2022-06-09 07:53:14 -04:00
|
|
|
|
|
|
|
except Exception as e:
|
twister: Fields for Kconfig and DT overlay files in testcase.yaml
This is an implementation of issue #48334 and adds support for
specifying additional config and device tree overlays through fields in
the testcase.yaml file, which is more readable than having to cram these
in to `extra_args`.
Consider this example which shows the original and new ways to add
config and DT overlays:
```
common:
extra_args: "CONF_FILE=a.conf;b.conf
DTC_OVERLAY_FILE=w.overlay;x.overlay OVERLAY_CONFIG=e.conf
UNRELATED=abc"
tests:
utilities.base64:
extra_conf_files:
- "c.conf"
- "d.conf"
extra_overlay_confs:
- "extra_overlay.conf"
extra_dtc_overlay_files:
- "y.overlay"
- "z.overlay"
extra_configs:
- CONFIG_SAMPLE=y
tags: base64
type: unit
```
The new fields are `extra_conf_files`, `extra_overlay_confs,
`extra_dtc_overlay_files`. Files specified in these sections are appended
to any `CONF_FILE`, `OVERLAY_CONFIG`, or `DTC_OVERLAY_FILE` fields in
`extra_args`, causing the following args being passed in to
`self.run_cmake` at `runner.py:850`:
```
['-DUNRELATED=abc',
'-DCONF_FILE=a.conf;b.conf;c.conf;d.conf',
'-DDTC_OVERLAY_FILE=w.overlay;x.overlay;y.overlay;z.overlay',
'-DOVERLAY_CONFIG=e.conf extra_overlay.conf '
'<build_dir>/twister/testsuite_extra.conf']
```
These fields can be used in the common or scenario-specific YAML
sections and will be merged in order of least to most specific:
1. config files extracted from common's extra_args
2. files listed in common's {extra_conf_files or extra_overlay_confs
or extra_dtc_overlay_files}
3. config files extracted from test scenario's extra_args
4. files listed in test scenario's {extra_conf_files or
extra_overlay_confs or extra_dtc_overlay_files}
Specifying these files in extra_args now triggers a deprecation warning,
as the direct YAML fields are preferred for readability. They will still
function for now but support will be dropped in the future. One
testcase.yaml
(`zephyr/tests/cmake/overlays/var_expansions/testcase.yaml`) is
converted to use the new fields. A follow-up PR will convert the
remaining files to the new format.
Signed-off-by: Tristan Honscheid <honscheid@google.com>
2022-08-09 16:51:17 -06:00
|
|
|
logger.error(f"{suite_path}: can't load (skipping): {e!r}")
|
2022-06-09 07:53:14 -04:00
|
|
|
self.load_errors += 1
|
|
|
|
return len(self.testsuites)
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return self.name
|
2020-03-24 14:40:28 -04:00
|
|
|
|
|
|
|
def get_platform(self, name):
|
|
|
|
selected_platform = None
|
|
|
|
for platform in self.platforms:
|
2024-08-09 17:17:40 -04:00
|
|
|
if name in platform.aliases:
|
2020-03-24 14:40:28 -04:00
|
|
|
selected_platform = platform
|
|
|
|
break
|
|
|
|
return selected_platform
|
|
|
|
|
2024-03-29 19:51:14 +08:00
|
|
|
def handle_quarantined_tests(self, instance: TestInstance, plat: Platform):
|
|
|
|
if self.quarantine:
|
2025-03-20 15:42:22 +01:00
|
|
|
sim_name = plat.simulation
|
|
|
|
if sim_name != "na" and (simulator := plat.simulator_by_name(self.options.sim_name)):
|
|
|
|
sim_name = simulator.name
|
2024-03-29 19:51:14 +08:00
|
|
|
matched_quarantine = self.quarantine.get_matched_quarantine(
|
2024-11-27 17:50:07 +00:00
|
|
|
instance.testsuite.id,
|
|
|
|
plat.name,
|
|
|
|
plat.arch,
|
2025-03-20 15:42:22 +01:00
|
|
|
sim_name
|
2024-03-29 19:51:14 +08:00
|
|
|
)
|
|
|
|
if matched_quarantine and not self.options.quarantine_verify:
|
2025-05-30 15:36:42 +02:00
|
|
|
instance.status = TwisterStatus.SKIP
|
|
|
|
instance.reason = "Quarantine: " + matched_quarantine
|
2024-03-29 19:51:14 +08:00
|
|
|
return
|
|
|
|
if not matched_quarantine and self.options.quarantine_verify:
|
2025-05-30 15:36:42 +02:00
|
|
|
instance.status = TwisterStatus.SKIP
|
|
|
|
instance.reason = "Not under quarantine"
|
2024-03-29 19:51:14 +08:00
|
|
|
|
2024-11-27 13:30:20 +00:00
|
|
|
def load_from_file(self, file, filter_platform=None):
|
|
|
|
if filter_platform is None:
|
|
|
|
filter_platform = []
|
2024-05-06 15:17:54 +02:00
|
|
|
try:
|
2024-11-27 14:57:06 +00:00
|
|
|
with open(file) as json_test_plan:
|
2024-05-06 15:17:54 +02:00
|
|
|
jtp = json.load(json_test_plan)
|
|
|
|
instance_list = []
|
|
|
|
for ts in jtp.get("testsuites", []):
|
|
|
|
logger.debug(f"loading {ts['name']}...")
|
|
|
|
testsuite = ts["name"]
|
2024-12-05 10:16:03 -05:00
|
|
|
toolchain = ts["toolchain"]
|
2024-05-06 15:17:54 +02:00
|
|
|
|
|
|
|
platform = self.get_platform(ts["platform"])
|
|
|
|
if filter_platform and platform.name not in filter_platform:
|
|
|
|
continue
|
2024-12-05 10:16:03 -05:00
|
|
|
instance = TestInstance(
|
|
|
|
self.testsuites[testsuite], platform, toolchain, self.env.outdir
|
|
|
|
)
|
2024-05-06 15:17:54 +02:00
|
|
|
if ts.get("run_id"):
|
|
|
|
instance.run_id = ts.get("run_id")
|
2022-05-04 12:30:17 -04:00
|
|
|
|
2024-05-06 15:17:54 +02:00
|
|
|
instance.run = instance.check_runnable(
|
2024-08-09 17:17:40 -04:00
|
|
|
self.options,
|
2024-05-06 15:17:54 +02:00
|
|
|
self.hwm
|
|
|
|
)
|
|
|
|
|
2024-10-22 10:05:44 +00:00
|
|
|
if self.options.test_only and not instance.run:
|
|
|
|
continue
|
|
|
|
|
2024-05-06 15:17:54 +02:00
|
|
|
instance.metrics['handler_time'] = ts.get('execution_time', 0)
|
|
|
|
instance.metrics['used_ram'] = ts.get("used_ram", 0)
|
|
|
|
instance.metrics['used_rom'] = ts.get("used_rom",0)
|
|
|
|
instance.metrics['available_ram'] = ts.get('available_ram', 0)
|
|
|
|
instance.metrics['available_rom'] = ts.get('available_rom', 0)
|
|
|
|
|
2024-09-12 11:01:10 +00:00
|
|
|
status = TwisterStatus(ts.get('status'))
|
2024-05-06 15:17:54 +02:00
|
|
|
reason = ts.get("reason", "Unknown")
|
2024-05-28 12:31:53 +00:00
|
|
|
if status in [TwisterStatus.ERROR, TwisterStatus.FAIL]:
|
2024-05-06 15:17:54 +02:00
|
|
|
if self.options.report_summary is not None:
|
2024-04-11 08:53:40 +00:00
|
|
|
instance.status = status
|
2024-05-06 15:17:54 +02:00
|
|
|
instance.reason = reason
|
|
|
|
self.instance_fail_count += 1
|
|
|
|
else:
|
2024-05-28 12:31:53 +00:00
|
|
|
instance.status = TwisterStatus.NONE
|
2024-05-06 15:17:54 +02:00
|
|
|
instance.reason = None
|
|
|
|
instance.retries += 1
|
2024-10-22 10:05:44 +00:00
|
|
|
# test marked as built only can run when --test-only is used.
|
|
|
|
# Reset status to capture new results.
|
|
|
|
elif status == TwisterStatus.NOTRUN and instance.run and self.options.test_only:
|
2024-05-28 12:31:53 +00:00
|
|
|
instance.status = TwisterStatus.NONE
|
2024-05-06 15:17:54 +02:00
|
|
|
instance.reason = None
|
|
|
|
else:
|
|
|
|
instance.status = status
|
|
|
|
instance.reason = reason
|
|
|
|
|
|
|
|
self.handle_quarantined_tests(instance, platform)
|
|
|
|
|
|
|
|
for tc in ts.get('testcases', []):
|
|
|
|
identifier = tc['identifier']
|
2024-09-12 11:01:10 +00:00
|
|
|
tc_status = TwisterStatus(tc.get('status'))
|
2024-05-06 15:17:54 +02:00
|
|
|
tc_reason = None
|
|
|
|
# we set reason only if status is valid, it might have been
|
|
|
|
# reset above...
|
2024-05-28 12:31:53 +00:00
|
|
|
if instance.status != TwisterStatus.NONE:
|
2024-05-06 15:17:54 +02:00
|
|
|
tc_reason = tc.get('reason')
|
2024-05-28 12:31:53 +00:00
|
|
|
if tc_status != TwisterStatus.NONE:
|
2024-11-27 17:50:07 +00:00
|
|
|
case = instance.set_case_status_by_name(
|
|
|
|
identifier,
|
|
|
|
tc_status,
|
|
|
|
tc_reason
|
|
|
|
)
|
2024-05-06 15:17:54 +02:00
|
|
|
case.duration = tc.get('execution_time', 0)
|
|
|
|
if tc.get('log'):
|
|
|
|
case.output = tc.get('log')
|
|
|
|
|
2024-08-09 17:17:40 -04:00
|
|
|
instance.create_overlay(platform,
|
|
|
|
self.options.enable_asan,
|
|
|
|
self.options.enable_ubsan,
|
|
|
|
self.options.enable_coverage,
|
|
|
|
self.options.coverage_platform
|
|
|
|
)
|
2024-05-06 15:17:54 +02:00
|
|
|
instance_list.append(instance)
|
|
|
|
self.add_instances(instance_list)
|
|
|
|
except FileNotFoundError as e:
|
|
|
|
logger.error(f"{e}")
|
|
|
|
return 1
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2024-08-09 17:17:40 -04:00
|
|
|
def check_platform(self, platform, platform_list):
|
2024-11-27 11:44:21 +00:00
|
|
|
return any(p in platform.aliases for p in platform_list)
|
2024-08-09 17:17:40 -04:00
|
|
|
|
2020-03-24 14:40:28 -04:00
|
|
|
def apply_filters(self, **kwargs):
|
|
|
|
|
2022-06-11 17:24:28 -04:00
|
|
|
platform_filter = self.options.platform
|
2023-06-10 00:55:21 +00:00
|
|
|
vendor_filter = self.options.vendor
|
2022-10-10 10:52:58 -04:00
|
|
|
exclude_platform = self.options.exclude_platform
|
2022-06-11 16:46:33 -04:00
|
|
|
testsuite_filter = self.run_individual_testsuite
|
2022-06-11 17:24:28 -04:00
|
|
|
arch_filter = self.options.arch
|
|
|
|
tag_filter = self.options.tag
|
|
|
|
exclude_tag = self.options.exclude_tag
|
|
|
|
all_filter = self.options.all
|
|
|
|
runnable = (self.options.device_testing or self.options.filter == 'runnable')
|
|
|
|
force_toolchain = self.options.force_toolchain
|
|
|
|
force_platform = self.options.force_platform
|
2023-06-22 13:58:25 +00:00
|
|
|
slow_only = self.options.enable_slow_only
|
2022-11-15 11:49:06 -06:00
|
|
|
ignore_platform_key = self.options.ignore_platform_key
|
2022-06-11 17:24:28 -04:00
|
|
|
emu_filter = self.options.emulation_only
|
2020-03-24 14:40:28 -04:00
|
|
|
|
|
|
|
logger.debug("platform filter: " + str(platform_filter))
|
2023-06-10 00:55:21 +00:00
|
|
|
logger.debug(" vendor filter: " + str(vendor_filter))
|
2020-03-24 14:40:28 -04:00
|
|
|
logger.debug(" arch_filter: " + str(arch_filter))
|
|
|
|
logger.debug(" tag_filter: " + str(tag_filter))
|
|
|
|
logger.debug(" exclude_tag: " + str(exclude_tag))
|
|
|
|
|
|
|
|
default_platforms = False
|
2023-06-10 00:55:21 +00:00
|
|
|
vendor_platforms = False
|
2020-08-26 15:47:25 -04:00
|
|
|
emulation_platforms = False
|
2020-03-24 14:40:28 -04:00
|
|
|
|
|
|
|
if all_filter:
|
2024-12-11 07:18:00 -05:00
|
|
|
logger.info("Selecting all possible platforms per testsuite scenario")
|
2020-03-24 14:40:28 -04:00
|
|
|
# When --all used, any --platform arguments ignored
|
|
|
|
platform_filter = []
|
2023-06-10 00:55:21 +00:00
|
|
|
elif not platform_filter and not emu_filter and not vendor_filter:
|
2024-12-11 07:18:00 -05:00
|
|
|
logger.info("Selecting default platforms per testsuite scenario")
|
2020-03-24 14:40:28 -04:00
|
|
|
default_platforms = True
|
2020-08-26 15:47:25 -04:00
|
|
|
elif emu_filter:
|
2025-03-23 05:08:55 -04:00
|
|
|
logger.info("Selecting emulation platforms per testsuite scenario")
|
2020-08-26 15:47:25 -04:00
|
|
|
emulation_platforms = True
|
2023-06-10 00:55:21 +00:00
|
|
|
elif vendor_filter:
|
|
|
|
vendor_platforms = True
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2024-08-09 17:17:40 -04:00
|
|
|
_platforms = []
|
2020-11-24 13:21:27 -05:00
|
|
|
if platform_filter:
|
2024-08-09 17:17:40 -04:00
|
|
|
logger.debug(f"Checking platform filter: {platform_filter}")
|
|
|
|
# find in aliases and rename
|
2024-12-05 06:34:46 -05:00
|
|
|
platform_filter = self.verify_platforms_existence(platform_filter, "platform_filter")
|
2020-11-24 13:21:27 -05:00
|
|
|
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
|
|
|
|
elif emu_filter:
|
2024-11-27 17:50:07 +00:00
|
|
|
platforms = list(
|
|
|
|
filter(lambda p: bool(p.simulator_by_name(self.options.sim_name)), self.platforms)
|
|
|
|
)
|
2023-06-10 00:55:21 +00:00
|
|
|
elif vendor_filter:
|
|
|
|
platforms = list(filter(lambda p: p.vendor in vendor_filter, self.platforms))
|
2023-06-22 18:47:45 +00:00
|
|
|
logger.info(f"Selecting platforms by vendors: {','.join(vendor_filter)}")
|
2020-11-24 13:21:27 -05:00
|
|
|
elif arch_filter:
|
|
|
|
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
|
|
|
|
elif default_platforms:
|
2022-11-21 15:17:21 -05:00
|
|
|
_platforms = list(filter(lambda p: p.name in self.default_platforms, self.platforms))
|
2022-11-21 11:59:01 -05:00
|
|
|
platforms = []
|
|
|
|
# default platforms that can't be run are dropped from the list of
|
|
|
|
# the default platforms list. Default platforms should always be
|
|
|
|
# runnable.
|
|
|
|
for p in _platforms:
|
2024-10-16 17:35:10 +01:00
|
|
|
sim = p.simulator_by_name(self.options.sim_name)
|
|
|
|
if (not sim) or sim.is_runnable():
|
2022-11-21 11:59:01 -05:00
|
|
|
platforms.append(p)
|
2020-11-24 13:21:27 -05:00
|
|
|
else:
|
|
|
|
platforms = self.platforms
|
|
|
|
|
2022-11-21 15:17:21 -05:00
|
|
|
platform_config = self.test_config.get('platforms', {})
|
2025-03-23 05:08:55 -04:00
|
|
|
# test configuration options
|
|
|
|
test_config_options = self.test_config.get('options', {})
|
|
|
|
integration_mode_list = test_config_options.get('integration_mode', [])
|
|
|
|
|
2022-03-23 14:07:54 -04:00
|
|
|
logger.info("Building initial testsuite list...")
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2022-11-15 11:49:06 -06:00
|
|
|
keyed_tests = {}
|
2024-12-05 06:34:46 -05:00
|
|
|
for _, ts in self.testsuites.items():
|
2025-03-23 05:08:55 -04:00
|
|
|
if ts.integration_platforms:
|
|
|
|
_integration_platforms = list(
|
2024-11-27 17:50:07 +00:00
|
|
|
filter(lambda item: item.name in ts.integration_platforms, self.platforms)
|
|
|
|
)
|
2025-03-23 05:08:55 -04:00
|
|
|
else:
|
|
|
|
_integration_platforms = []
|
|
|
|
|
|
|
|
if (ts.build_on_all and not platform_filter and
|
|
|
|
platform_config.get('increased_platform_scope', True)):
|
|
|
|
# if build_on_all is set, we build on all platforms
|
|
|
|
platform_scope = self.platforms
|
|
|
|
elif ts.integration_platforms and self.options.integration:
|
|
|
|
# if integration is set, we build on integration platforms
|
|
|
|
platform_scope = _integration_platforms
|
|
|
|
elif ts.integration_platforms and not platform_filter:
|
|
|
|
# if integration platforms are set, we build on those and integration mode is set
|
|
|
|
# for this test suite, we build on integration platforms
|
|
|
|
if any(ts.id.startswith(i) for i in integration_mode_list):
|
|
|
|
platform_scope = _integration_platforms
|
2024-02-06 12:49:31 -05:00
|
|
|
else:
|
2025-03-23 05:08:55 -04:00
|
|
|
platform_scope = platforms + _integration_platforms
|
2021-01-14 09:34:29 -05:00
|
|
|
else:
|
|
|
|
platform_scope = platforms
|
|
|
|
|
2022-06-09 14:48:11 -04:00
|
|
|
integration = self.options.integration and ts.integration_platforms
|
2021-04-02 10:18:01 -05:00
|
|
|
|
|
|
|
# If there isn't any overlap between the platform_allow list and the platform_scope
|
|
|
|
# we set the scope to the platform_allow list
|
2024-11-27 17:50:07 +00:00
|
|
|
if (
|
|
|
|
ts.platform_allow
|
|
|
|
and not platform_filter
|
|
|
|
and not integration
|
|
|
|
and platform_config.get('increased_platform_scope', True)
|
|
|
|
):
|
2021-04-02 10:18:01 -05:00
|
|
|
a = set(platform_scope)
|
2022-03-23 14:55:41 -04:00
|
|
|
b = set(filter(lambda item: item.name in ts.platform_allow, self.platforms))
|
2021-04-02 10:18:01 -05:00
|
|
|
c = a.intersection(b)
|
|
|
|
if not c:
|
2024-11-27 17:50:07 +00:00
|
|
|
platform_scope = list(
|
|
|
|
filter(lambda item: item.name in ts.platform_allow, self.platforms)
|
|
|
|
)
|
2022-03-23 14:07:54 -04:00
|
|
|
# list of instances per testsuite, aka configurations.
|
2020-03-24 14:40:28 -04:00
|
|
|
instance_list = []
|
2024-12-05 10:16:03 -05:00
|
|
|
for itoolchain, plat in itertools.product(
|
|
|
|
ts.integration_toolchains or [None], platform_scope
|
|
|
|
):
|
|
|
|
if itoolchain:
|
|
|
|
toolchain = itoolchain
|
2025-01-09 21:37:42 -05:00
|
|
|
elif plat.arch in ['posix', 'unit']:
|
|
|
|
# workaround until toolchain variant in zephyr is overhauled and improved.
|
|
|
|
if self.env.toolchain in ['llvm']:
|
|
|
|
toolchain = 'llvm'
|
|
|
|
else:
|
|
|
|
toolchain = 'host'
|
2024-12-05 10:16:03 -05:00
|
|
|
else:
|
2025-01-09 21:37:42 -05:00
|
|
|
toolchain = "zephyr" if not self.env.toolchain else self.env.toolchain
|
2024-12-05 10:16:03 -05:00
|
|
|
|
|
|
|
instance = TestInstance(ts, plat, toolchain, self.env.outdir)
|
2020-07-27 12:27:13 -04:00
|
|
|
instance.run = instance.check_runnable(
|
2024-08-09 17:17:40 -04:00
|
|
|
self.options,
|
2023-09-21 11:39:30 +02:00
|
|
|
self.hwm
|
2020-03-24 14:40:28 -04:00
|
|
|
)
|
2020-05-21 10:35:33 -04:00
|
|
|
|
2024-08-09 17:17:40 -04:00
|
|
|
if not force_platform and self.check_platform(plat,exclude_platform):
|
2022-05-14 09:56:47 -04:00
|
|
|
instance.add_filter("Platform is excluded on command line.", Filters.CMD_LINE)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if (plat.arch == "unit") != (ts.type == "unit"):
|
2020-03-24 14:40:28 -04:00
|
|
|
# Discard silently
|
|
|
|
continue
|
|
|
|
|
2024-11-27 12:43:39 +00:00
|
|
|
if ts.modules and self.modules and not set(ts.modules).issubset(set(self.modules)):
|
2024-11-27 17:50:07 +00:00
|
|
|
instance.add_filter(
|
|
|
|
f"one or more required modules not available: {','.join(ts.modules)}",
|
|
|
|
Filters.MODULE
|
|
|
|
)
|
2022-04-18 22:34:39 -04:00
|
|
|
|
2022-11-21 15:17:21 -05:00
|
|
|
if self.options.level:
|
|
|
|
tl = self.get_level(self.options.level)
|
2024-04-19 11:51:15 +02:00
|
|
|
if tl is None:
|
2024-11-27 17:50:07 +00:00
|
|
|
instance.add_filter(
|
|
|
|
f"Unknown test level '{self.options.level}'",
|
|
|
|
Filters.TESTPLAN
|
|
|
|
)
|
2024-04-19 11:51:15 +02:00
|
|
|
else:
|
|
|
|
planned_scenarios = tl.scenarios
|
2024-11-27 17:50:07 +00:00
|
|
|
if (
|
|
|
|
ts.id not in planned_scenarios
|
|
|
|
and not set(ts.levels).intersection(set(tl.levels))
|
|
|
|
):
|
2024-04-19 11:51:15 +02:00
|
|
|
instance.add_filter("Not part of requested test plan", Filters.TESTPLAN)
|
2022-11-21 15:17:21 -05:00
|
|
|
|
2020-07-27 12:27:13 -04:00
|
|
|
if runnable and not instance.run:
|
2023-10-12 13:15:00 +02:00
|
|
|
instance.add_filter("Not runnable on device", Filters.CMD_LINE)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2024-11-27 17:50:07 +00:00
|
|
|
if (
|
|
|
|
self.options.integration
|
|
|
|
and ts.integration_platforms
|
|
|
|
and plat.name not in ts.integration_platforms
|
|
|
|
):
|
2022-05-14 09:56:47 -04:00
|
|
|
instance.add_filter("Not part of integration platforms", Filters.TESTSUITE)
|
2020-05-28 08:02:54 -04:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if ts.skip:
|
2022-10-12 20:19:43 -04:00
|
|
|
instance.add_filter("Skip filter", Filters.SKIP)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if tag_filter and not ts.tags.intersection(tag_filter):
|
2022-05-14 09:56:47 -04:00
|
|
|
instance.add_filter("Command line testsuite tag filter", Filters.CMD_LINE)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2023-06-22 13:58:25 +00:00
|
|
|
if slow_only and not ts.slow:
|
|
|
|
instance.add_filter("Not a slow test", Filters.CMD_LINE)
|
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if exclude_tag and ts.tags.intersection(exclude_tag):
|
2022-05-14 09:56:47 -04:00
|
|
|
instance.add_filter("Command line testsuite exclude filter", Filters.CMD_LINE)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2024-01-30 15:54:22 -05:00
|
|
|
if testsuite_filter:
|
|
|
|
normalized_f = [os.path.basename(_ts) for _ts in testsuite_filter]
|
|
|
|
if ts.id not in normalized_f:
|
|
|
|
instance.add_filter("Testsuite name filter", Filters.CMD_LINE)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
|
|
|
if arch_filter and plat.arch not in arch_filter:
|
2022-05-14 09:56:47 -04:00
|
|
|
instance.add_filter("Command line testsuite arch filter", Filters.CMD_LINE)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2020-05-01 14:57:00 -04:00
|
|
|
if not force_platform:
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if ts.arch_allow and plat.arch not in ts.arch_allow:
|
2024-12-11 07:18:00 -05:00
|
|
|
instance.add_filter("Not in testsuite arch allow list", Filters.TESTSUITE)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if ts.arch_exclude and plat.arch in ts.arch_exclude:
|
2024-12-11 07:18:00 -05:00
|
|
|
instance.add_filter("In testsuite arch exclude", Filters.TESTSUITE)
|
2020-05-01 14:57:00 -04:00
|
|
|
|
2024-11-21 10:31:46 -05:00
|
|
|
if ts.vendor_allow and plat.vendor not in ts.vendor_allow:
|
2024-11-27 17:50:07 +00:00
|
|
|
instance.add_filter(
|
2024-12-11 07:18:00 -05:00
|
|
|
"Not in testsuite vendor allow list",
|
2024-11-27 17:50:07 +00:00
|
|
|
Filters.TESTSUITE
|
|
|
|
)
|
2024-11-21 10:31:46 -05:00
|
|
|
|
|
|
|
if ts.vendor_exclude and plat.vendor in ts.vendor_exclude:
|
2024-12-11 07:18:00 -05:00
|
|
|
instance.add_filter("In testsuite vendor exclude", Filters.TESTSUITE)
|
2024-11-21 10:31:46 -05:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if ts.platform_exclude and plat.name in ts.platform_exclude:
|
2024-12-11 07:18:00 -05:00
|
|
|
instance.add_filter("In testsuite platform exclude", Filters.TESTSUITE)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if ts.toolchain_exclude and toolchain in ts.toolchain_exclude:
|
2024-12-11 07:18:00 -05:00
|
|
|
instance.add_filter("In testsuite toolchain exclude", Filters.TOOLCHAIN)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
|
|
|
if platform_filter and plat.name not in platform_filter:
|
2022-05-14 09:56:47 -04:00
|
|
|
instance.add_filter("Command line platform filter", Filters.CMD_LINE)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2023-03-08 15:54:02 +01:00
|
|
|
if ts.platform_allow \
|
|
|
|
and plat.name not in ts.platform_allow \
|
|
|
|
and not (platform_filter and force_platform):
|
2022-05-14 09:56:47 -04:00
|
|
|
instance.add_filter("Not in testsuite platform allow list", Filters.TESTSUITE)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2022-05-14 09:00:04 -04:00
|
|
|
if ts.platform_type and plat.type not in ts.platform_type:
|
2022-05-14 09:56:47 -04:00
|
|
|
instance.add_filter("Not in testsuite platform type list", Filters.TESTSUITE)
|
2022-05-14 09:00:04 -04:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if ts.toolchain_allow and toolchain not in ts.toolchain_allow:
|
2023-09-20 13:21:34 +02:00
|
|
|
instance.add_filter("Not in testsuite toolchain allow list", Filters.TOOLCHAIN)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
|
|
|
if not plat.env_satisfied:
|
2024-11-27 17:50:07 +00:00
|
|
|
instance.add_filter(
|
|
|
|
"Environment ({}) not satisfied".format(", ".join(plat.env)),
|
2024-11-30 09:41:12 -05:00
|
|
|
Filters.ENVIRONMENT
|
2024-11-27 17:50:07 +00:00
|
|
|
)
|
2025-02-23 08:38:11 -05:00
|
|
|
if plat.type == 'native' and sys.platform != 'linux':
|
|
|
|
instance.add_filter("Native platform requires Linux", Filters.ENVIRONMENT)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
|
|
|
if not force_toolchain \
|
2024-12-05 10:16:03 -05:00
|
|
|
and toolchain and (toolchain not in plat.supported_toolchains):
|
2024-12-11 07:18:00 -05:00
|
|
|
instance.add_filter(
|
|
|
|
f"Not supported by the toolchain: {toolchain}",
|
|
|
|
Filters.PLATFORM
|
|
|
|
)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if plat.ram < ts.min_ram:
|
2022-05-14 09:56:47 -04:00
|
|
|
instance.add_filter("Not enough RAM", Filters.PLATFORM)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2023-04-05 15:21:44 +02:00
|
|
|
if ts.harness:
|
2024-10-16 17:35:10 +01:00
|
|
|
sim = plat.simulator_by_name(self.options.sim_name)
|
2024-11-26 13:37:02 +00:00
|
|
|
if ts.harness == 'robot' and not (sim and sim.name == 'renode'):
|
2024-11-27 17:50:07 +00:00
|
|
|
instance.add_filter(
|
|
|
|
"No robot support for the selected platform",
|
|
|
|
Filters.SKIP
|
|
|
|
)
|
2023-04-05 15:21:44 +02:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if ts.depends_on:
|
|
|
|
dep_intersection = ts.depends_on.intersection(set(plat.supported))
|
|
|
|
if dep_intersection != set(ts.depends_on):
|
2025-01-21 13:07:33 -06:00
|
|
|
instance.add_filter(
|
|
|
|
f"No hardware support for {set(ts.depends_on)-dep_intersection}",
|
|
|
|
Filters.PLATFORM
|
|
|
|
)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if plat.flash < ts.min_flash:
|
2022-05-14 09:56:47 -04:00
|
|
|
instance.add_filter("Not enough FLASH", Filters.PLATFORM)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if set(plat.ignore_tags) & ts.tags:
|
2024-11-27 17:50:07 +00:00
|
|
|
instance.add_filter(
|
|
|
|
"Excluded tags per platform (exclude_tags)",
|
|
|
|
Filters.PLATFORM
|
|
|
|
)
|
2020-07-16 16:27:04 -04:00
|
|
|
|
2022-03-23 14:55:41 -04:00
|
|
|
if plat.only_tags and not set(plat.only_tags) & ts.tags:
|
2022-05-14 09:56:47 -04:00
|
|
|
instance.add_filter("Excluded tags per platform (only_tags)", Filters.PLATFORM)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
2023-08-03 10:56:41 +01:00
|
|
|
if ts.required_snippets:
|
|
|
|
missing_snippet = False
|
|
|
|
snippet_args = {"snippets": ts.required_snippets}
|
2024-11-27 17:50:07 +00:00
|
|
|
found_snippets = snippets.find_snippets_in_roots(
|
|
|
|
snippet_args,
|
|
|
|
[*self.env.snippet_roots, Path(ts.source_dir)]
|
|
|
|
)
|
2023-08-03 10:56:41 +01:00
|
|
|
|
|
|
|
# Search and check that all required snippet files are found
|
|
|
|
for this_snippet in snippet_args['snippets']:
|
|
|
|
if this_snippet not in found_snippets:
|
2024-11-27 16:57:32 +00:00
|
|
|
logger.error(
|
|
|
|
f"Can't find snippet '{this_snippet}' for test '{ts.name}'"
|
|
|
|
)
|
2024-05-28 12:31:53 +00:00
|
|
|
instance.status = TwisterStatus.ERROR
|
2023-08-03 10:56:41 +01:00
|
|
|
instance.reason = f"Snippet {this_snippet} not found"
|
|
|
|
missing_snippet = True
|
|
|
|
break
|
|
|
|
|
|
|
|
if not missing_snippet:
|
|
|
|
# Look for required snippets and check that they are applicable for these
|
|
|
|
# platforms/boards
|
2023-11-11 11:41:45 +10:00
|
|
|
for this_snippet in snippet_args['snippets']:
|
2023-08-03 10:56:41 +01:00
|
|
|
matched_snippet_board = False
|
|
|
|
|
|
|
|
# If the "appends" key is present with at least one entry then this
|
|
|
|
# snippet applies to all boards and further platform-specific checks
|
|
|
|
# are not required
|
|
|
|
if found_snippets[this_snippet].appends:
|
|
|
|
continue
|
|
|
|
|
|
|
|
for this_board in found_snippets[this_snippet].board2appends:
|
|
|
|
if this_board.startswith('/'):
|
|
|
|
match = re.search(this_board[1:-1], plat.name)
|
|
|
|
if match is not None:
|
|
|
|
matched_snippet_board = True
|
|
|
|
break
|
|
|
|
elif this_board == plat.name:
|
|
|
|
matched_snippet_board = True
|
|
|
|
break
|
|
|
|
|
|
|
|
if matched_snippet_board is False:
|
|
|
|
instance.add_filter("Snippet not supported", Filters.PLATFORM)
|
|
|
|
break
|
|
|
|
|
2023-11-27 22:13:55 +00:00
|
|
|
# handle quarantined tests
|
2024-03-29 19:51:14 +08:00
|
|
|
self.handle_quarantined_tests(instance, plat)
|
2023-11-27 22:13:55 +00:00
|
|
|
|
2024-11-27 17:50:07 +00:00
|
|
|
# platform_key is a list of unique platform attributes that form a unique key
|
|
|
|
# a test will match against to determine if it should be scheduled to run.
|
|
|
|
# A key containing a field name that the platform does not have
|
|
|
|
# will filter the platform.
|
2022-11-15 11:49:06 -06:00
|
|
|
#
|
2023-11-20 15:36:23 +01:00
|
|
|
# A simple example is keying on arch and simulation
|
|
|
|
# to run a test once per unique (arch, simulation) platform.
|
2024-11-27 17:50:07 +00:00
|
|
|
if (
|
|
|
|
not ignore_platform_key
|
|
|
|
and hasattr(ts, 'platform_key')
|
|
|
|
and len(ts.platform_key) > 0
|
|
|
|
):
|
2022-11-15 11:49:06 -06:00
|
|
|
key_fields = sorted(set(ts.platform_key))
|
2024-10-16 17:35:10 +01:00
|
|
|
keys = [getattr(plat, key_field, None) for key_field in key_fields]
|
2023-11-20 15:36:23 +01:00
|
|
|
for key in keys:
|
|
|
|
if key is None or key == 'na':
|
|
|
|
instance.add_filter(
|
2024-11-27 17:50:07 +00:00
|
|
|
"Excluded platform missing key fields"
|
|
|
|
f" demanded by test {key_fields}",
|
2023-11-20 15:36:23 +01:00
|
|
|
Filters.PLATFORM
|
|
|
|
)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
test_keys = copy.deepcopy(keys)
|
|
|
|
test_keys.append(ts.name)
|
|
|
|
test_keys = tuple(test_keys)
|
|
|
|
keyed_test = keyed_tests.get(test_keys)
|
2022-11-15 11:49:06 -06:00
|
|
|
if keyed_test is not None:
|
2024-11-27 17:50:07 +00:00
|
|
|
plat_key = {
|
|
|
|
key_field: getattr(
|
|
|
|
keyed_test['plat'],
|
|
|
|
key_field
|
|
|
|
) for key_field in key_fields
|
|
|
|
}
|
|
|
|
instance.add_filter(
|
|
|
|
f"Already covered for key {key}"
|
|
|
|
f" by platform {keyed_test['plat'].name} having key {plat_key}",
|
|
|
|
Filters.PLATFORM_KEY
|
|
|
|
)
|
2022-11-15 11:49:06 -06:00
|
|
|
else:
|
2024-08-09 17:17:40 -04:00
|
|
|
# do not add a platform to keyed tests if previously
|
|
|
|
# filtered
|
|
|
|
|
2023-11-27 22:13:55 +00:00
|
|
|
if not instance.filters:
|
2023-11-20 15:36:23 +01:00
|
|
|
keyed_tests[test_keys] = {'plat': plat, 'ts': ts}
|
2022-11-15 11:49:06 -06:00
|
|
|
|
2020-03-24 14:40:28 -04:00
|
|
|
# if nothing stopped us until now, it means this configuration
|
|
|
|
# needs to be added.
|
|
|
|
instance_list.append(instance)
|
|
|
|
|
2022-03-23 14:07:54 -04:00
|
|
|
# no configurations, so jump to next testsuite
|
2020-03-24 14:40:28 -04:00
|
|
|
if not instance_list:
|
|
|
|
continue
|
|
|
|
|
2020-12-07 11:40:19 -05:00
|
|
|
# if twister was launched with no platform options at all, we
|
2020-03-24 14:40:28 -04:00
|
|
|
# take all default platforms
|
2022-03-23 14:55:41 -04:00
|
|
|
if default_platforms and not ts.build_on_all and not integration:
|
|
|
|
if ts.platform_allow:
|
2024-08-09 17:17:40 -04:00
|
|
|
_default_p = set(self.default_platforms)
|
|
|
|
_platform_allow = set(ts.platform_allow)
|
|
|
|
_intersection = _default_p.intersection(_platform_allow)
|
|
|
|
if _intersection:
|
2024-11-27 17:50:07 +00:00
|
|
|
aa = list(
|
|
|
|
filter(
|
|
|
|
lambda _scenario: _scenario.platform.name in _intersection,
|
|
|
|
instance_list
|
|
|
|
)
|
|
|
|
)
|
2020-03-24 14:40:28 -04:00
|
|
|
self.add_instances(aa)
|
|
|
|
else:
|
2021-04-02 10:18:01 -05:00
|
|
|
self.add_instances(instance_list)
|
2020-03-24 14:40:28 -04:00
|
|
|
else:
|
2024-02-06 12:49:31 -05:00
|
|
|
# add integration platforms to the list of default
|
|
|
|
# platforms, even if we are not in integration mode
|
|
|
|
_platforms = self.default_platforms + ts.integration_platforms
|
2024-11-27 17:50:07 +00:00
|
|
|
instances = list(
|
|
|
|
filter(lambda ts: ts.platform.name in _platforms, instance_list)
|
|
|
|
)
|
2020-03-24 14:40:28 -04:00
|
|
|
self.add_instances(instances)
|
2021-03-12 18:21:29 -05:00
|
|
|
elif integration:
|
2024-11-27 17:50:07 +00:00
|
|
|
instances = list(
|
|
|
|
filter(
|
|
|
|
lambda item: item.platform.name in ts.integration_platforms,
|
|
|
|
instance_list
|
|
|
|
)
|
|
|
|
)
|
2021-03-03 14:05:54 -05:00
|
|
|
self.add_instances(instances)
|
|
|
|
|
2020-08-26 15:47:25 -04:00
|
|
|
elif emulation_platforms:
|
|
|
|
self.add_instances(instance_list)
|
2024-11-27 17:50:07 +00:00
|
|
|
for instance in list(
|
|
|
|
filter(
|
|
|
|
lambda inst: not inst.platform.simulator_by_name(self.options.sim_name),
|
|
|
|
instance_list
|
|
|
|
)
|
|
|
|
):
|
2023-09-18 16:26:27 +02:00
|
|
|
instance.add_filter("Not an emulated platform", Filters.CMD_LINE)
|
2023-06-10 00:55:21 +00:00
|
|
|
elif vendor_platforms:
|
|
|
|
self.add_instances(instance_list)
|
2024-11-27 17:50:07 +00:00
|
|
|
for instance in list(
|
|
|
|
filter(
|
|
|
|
lambda inst: inst.platform.vendor not in vendor_filter,
|
|
|
|
instance_list
|
|
|
|
)
|
|
|
|
):
|
2023-06-10 00:55:21 +00:00
|
|
|
instance.add_filter("Not a selected vendor platform", Filters.CMD_LINE)
|
2020-03-24 14:40:28 -04:00
|
|
|
else:
|
|
|
|
self.add_instances(instance_list)
|
|
|
|
|
|
|
|
for _, case in self.instances.items():
|
2024-11-30 07:46:54 -05:00
|
|
|
# Do not create files for filtered instances
|
|
|
|
if case.status == TwisterStatus.FILTER:
|
|
|
|
continue
|
|
|
|
# set run_id for each unfiltered instance
|
|
|
|
case.setup_run_id()
|
2024-08-09 17:17:40 -04:00
|
|
|
case.create_overlay(case.platform,
|
|
|
|
self.options.enable_asan,
|
|
|
|
self.options.enable_ubsan,
|
|
|
|
self.options.enable_coverage,
|
|
|
|
self.options.coverage_platform)
|
2020-03-24 14:40:28 -04:00
|
|
|
|
|
|
|
self.selected_platforms = set(p.platform.name for p in self.instances.values())
|
|
|
|
|
2024-11-27 17:50:07 +00:00
|
|
|
filtered_instances = list(
|
|
|
|
filter(lambda item: item.status == TwisterStatus.FILTER, self.instances.values())
|
|
|
|
)
|
2022-05-14 09:56:47 -04:00
|
|
|
for filtered_instance in filtered_instances:
|
2023-03-20 11:15:20 +01:00
|
|
|
change_skip_to_error_if_integration(self.options, filtered_instance)
|
2020-07-17 11:13:50 +02:00
|
|
|
|
2022-06-08 07:08:30 -04:00
|
|
|
filtered_instance.add_missing_case_status(filtered_instance.status)
|
2021-10-21 14:05:54 +02:00
|
|
|
|
2020-03-24 14:40:28 -04:00
|
|
|
def add_instances(self, instance_list):
|
|
|
|
for instance in instance_list:
|
|
|
|
self.instances[instance.name] = instance
|
|
|
|
|
2020-09-11 13:56:33 -04:00
|
|
|
|
2022-03-23 14:07:54 -04:00
|
|
|
def get_testsuite(self, identifier):
|
2024-11-24 15:04:45 +01:00
|
|
|
results = []
|
|
|
|
for _, ts in self.testsuites.items():
|
|
|
|
if ts.id == identifier:
|
|
|
|
results.append(ts)
|
|
|
|
return results
|
|
|
|
|
|
|
|
def get_testcase(self, identifier):
|
2020-03-24 14:40:28 -04:00
|
|
|
results = []
|
2022-03-23 14:55:41 -04:00
|
|
|
for _, ts in self.testsuites.items():
|
2022-03-24 07:51:29 -04:00
|
|
|
for case in ts.testcases:
|
2022-08-12 06:21:28 -04:00
|
|
|
if case.name == identifier:
|
2022-03-23 14:55:41 -04:00
|
|
|
results.append(ts)
|
2020-03-24 14:40:28 -04:00
|
|
|
return results
|
|
|
|
|
2021-12-10 18:18:50 +01:00
|
|
|
def verify_platforms_existence(self, platform_names_to_verify, log_info=""):
|
|
|
|
"""
|
|
|
|
Verify if platform name (passed by --platform option, or in yaml file
|
|
|
|
as platform_allow or integration_platforms options) is correct. If not -
|
2022-01-10 14:15:59 +01:00
|
|
|
log and raise error.
|
2021-12-10 18:18:50 +01:00
|
|
|
"""
|
2024-12-04 21:56:07 -05:00
|
|
|
_platforms = []
|
2021-12-10 18:18:50 +01:00
|
|
|
for platform in platform_names_to_verify:
|
|
|
|
if platform in self.platform_names:
|
2024-12-04 21:56:07 -05:00
|
|
|
p = self.get_platform(platform)
|
|
|
|
if p:
|
|
|
|
_platforms.append(p.name)
|
2021-12-10 18:18:50 +01:00
|
|
|
else:
|
|
|
|
logger.error(f"{log_info} - unrecognized platform - {platform}")
|
2022-01-10 14:15:59 +01:00
|
|
|
sys.exit(2)
|
2024-12-04 21:56:07 -05:00
|
|
|
return _platforms
|
2021-12-10 18:18:50 +01:00
|
|
|
|
2022-01-14 16:36:01 +01:00
|
|
|
def create_build_dir_links(self):
|
|
|
|
"""
|
|
|
|
Iterate through all no-skipped instances in suite and create links
|
|
|
|
for each one build directories. Those links will be passed in the next
|
|
|
|
steps to the CMake command.
|
|
|
|
"""
|
|
|
|
|
|
|
|
links_dir_name = "twister_links" # folder for all links
|
2022-06-23 13:16:28 -04:00
|
|
|
links_dir_path = os.path.join(self.env.outdir, links_dir_name)
|
2022-01-14 16:36:01 +01:00
|
|
|
if not os.path.exists(links_dir_path):
|
|
|
|
os.mkdir(links_dir_path)
|
|
|
|
|
|
|
|
for instance in self.instances.values():
|
2024-05-28 12:31:53 +00:00
|
|
|
if instance.status != TwisterStatus.SKIP:
|
2022-01-14 16:36:01 +01:00
|
|
|
self._create_build_dir_link(links_dir_path, instance)
|
|
|
|
|
|
|
|
def _create_build_dir_link(self, links_dir_path, instance):
|
|
|
|
"""
|
|
|
|
Create build directory with original "long" path. Next take shorter
|
|
|
|
path and link them with original path - create link. At the end
|
|
|
|
replace build_dir to created link. This link will be passed to CMake
|
|
|
|
command. This action helps to limit path length which can be
|
|
|
|
significant during building by CMake on Windows OS.
|
|
|
|
"""
|
|
|
|
|
|
|
|
os.makedirs(instance.build_dir, exist_ok=True)
|
|
|
|
|
|
|
|
link_name = f"test_{self.link_dir_counter}"
|
|
|
|
link_path = os.path.join(links_dir_path, link_name)
|
|
|
|
|
|
|
|
if os.name == "nt": # if OS is Windows
|
2024-01-17 10:51:12 +01:00
|
|
|
command = ["mklink", "/J", f"{link_path}", os.path.normpath(instance.build_dir)]
|
2022-01-14 16:36:01 +01:00
|
|
|
subprocess.call(command, shell=True)
|
|
|
|
else: # for Linux and MAC OS
|
|
|
|
os.symlink(instance.build_dir, link_path)
|
|
|
|
|
|
|
|
# Here original build directory is replaced with symbolic link. It will
|
|
|
|
# be passed to CMake command
|
|
|
|
instance.build_dir = link_path
|
|
|
|
|
|
|
|
self.link_dir_counter += 1
|
2023-03-20 11:15:20 +01:00
|
|
|
|
|
|
|
|
|
|
|
def change_skip_to_error_if_integration(options, instance):
|
2023-09-15 15:22:15 +02:00
|
|
|
''' All skips on integration_platforms are treated as errors.'''
|
2023-09-19 13:20:37 +02:00
|
|
|
if instance.platform.name in instance.testsuite.integration_platforms:
|
|
|
|
# Do not treat this as error if filter type is among ignore_filters
|
2023-03-20 11:15:20 +01:00
|
|
|
filters = {t['type'] for t in instance.filters}
|
2023-09-20 13:21:34 +02:00
|
|
|
ignore_filters ={Filters.CMD_LINE, Filters.SKIP, Filters.PLATFORM_KEY,
|
2023-09-19 13:20:37 +02:00
|
|
|
Filters.TOOLCHAIN, Filters.MODULE, Filters.TESTPLAN,
|
2024-11-30 09:41:12 -05:00
|
|
|
Filters.QUARANTINE, Filters.ENVIRONMENT}
|
2023-09-19 17:42:02 +00:00
|
|
|
if filters.intersection(ignore_filters):
|
2023-03-20 11:15:20 +01:00
|
|
|
return
|
2024-05-28 12:31:53 +00:00
|
|
|
instance.status = TwisterStatus.ERROR
|
2023-03-20 11:15:20 +01:00
|
|
|
instance.reason += " but is one of the integration platforms"
|
2024-11-27 17:50:07 +00:00
|
|
|
logger.debug(
|
|
|
|
f"Changing status of {instance.name} to ERROR because it is an integration platform"
|
|
|
|
)
|