2022-06-09 13:56:26 -04:00
|
|
|
# vim: set syntax=python ts=4 :
|
|
|
|
#
|
|
|
|
# Copyright (c) 2018-2022 Intel Corporation
|
2022-07-13 12:58:48 +07:00
|
|
|
# Copyright 2022 NXP
|
2022-06-09 13:56:26 -04:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2023-06-12 18:15:31 +02:00
|
|
|
from __future__ import annotations
|
2022-06-09 13:56:26 -04:00
|
|
|
import os
|
|
|
|
import hashlib
|
|
|
|
import random
|
2022-06-09 14:48:11 -04:00
|
|
|
import logging
|
2022-06-23 13:16:28 -04:00
|
|
|
import shutil
|
|
|
|
import glob
|
2022-06-09 13:56:26 -04:00
|
|
|
|
2023-06-12 18:15:31 +02:00
|
|
|
from twisterlib.testsuite import TestCase, TestSuite
|
|
|
|
from twisterlib.platform import Platform
|
2022-06-23 17:40:57 -04:00
|
|
|
from twisterlib.error import BuildError
|
|
|
|
from twisterlib.size_calc import SizeCalculator
|
2023-06-12 18:15:31 +02:00
|
|
|
from twisterlib.handlers import (
|
|
|
|
Handler,
|
|
|
|
SimulationHandler,
|
|
|
|
BinaryHandler,
|
|
|
|
QEMUHandler,
|
|
|
|
DeviceHandler,
|
|
|
|
SUPPORTED_SIMS,
|
|
|
|
SUPPORTED_SIMS_IN_PYTEST,
|
|
|
|
)
|
2022-06-23 17:40:57 -04:00
|
|
|
|
2022-06-09 14:48:11 -04:00
|
|
|
logger = logging.getLogger('twister')
|
|
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
|
2022-06-09 13:56:26 -04:00
|
|
|
class TestInstance:
|
|
|
|
"""Class representing the execution of a particular TestSuite on a platform
|
|
|
|
|
|
|
|
@param test The TestSuite object we want to build/execute
|
|
|
|
@param platform Platform object that we want to build and run against
|
|
|
|
@param base_outdir Base directory for all test results. The actual
|
|
|
|
out directory used is <outdir>/<platform>/<test case name>
|
|
|
|
"""
|
|
|
|
|
2023-05-18 23:36:17 -06:00
|
|
|
__test__ = False
|
|
|
|
|
2022-06-09 13:56:26 -04:00
|
|
|
def __init__(self, testsuite, platform, outdir):
|
|
|
|
|
2023-06-12 18:15:31 +02:00
|
|
|
self.testsuite: TestSuite = testsuite
|
|
|
|
self.platform: Platform = platform
|
2022-06-09 13:56:26 -04:00
|
|
|
|
|
|
|
self.status = None
|
|
|
|
self.reason = "Unknown"
|
|
|
|
self.metrics = dict()
|
|
|
|
self.handler = None
|
|
|
|
self.outdir = outdir
|
|
|
|
self.execution_time = 0
|
2023-11-22 16:30:10 -05:00
|
|
|
self.build_time = 0
|
2022-10-24 13:34:48 -04:00
|
|
|
self.retries = 0
|
2022-06-09 13:56:26 -04:00
|
|
|
|
|
|
|
self.name = os.path.join(platform.name, testsuite.name)
|
|
|
|
self.run_id = self._get_run_id()
|
2023-08-17 16:42:42 -04:00
|
|
|
self.dut = None
|
2023-07-21 16:09:39 +02:00
|
|
|
if testsuite.detailed_test_id:
|
|
|
|
self.build_dir = os.path.join(outdir, platform.name, testsuite.name)
|
|
|
|
else:
|
|
|
|
# if suite is not in zephyr, keep only the part after ".." in reconstructed dir structure
|
|
|
|
source_dir_rel = testsuite.source_dir_rel.rsplit(os.pardir+os.path.sep, 1)[-1]
|
|
|
|
self.build_dir = os.path.join(outdir, platform.name, source_dir_rel, testsuite.name)
|
2022-06-09 13:56:26 -04:00
|
|
|
|
2023-04-11 14:41:01 -05:00
|
|
|
self.domains = None
|
|
|
|
|
2022-06-09 13:56:26 -04:00
|
|
|
self.run = False
|
2023-06-12 18:15:31 +02:00
|
|
|
self.testcases: list[TestCase] = []
|
2022-06-09 13:56:26 -04:00
|
|
|
self.init_cases()
|
|
|
|
self.filters = []
|
|
|
|
self.filter_type = None
|
|
|
|
|
|
|
|
def add_filter(self, reason, filter_type):
|
|
|
|
self.filters.append({'type': filter_type, 'reason': reason })
|
|
|
|
self.status = "filtered"
|
|
|
|
self.reason = reason
|
|
|
|
self.filter_type = filter_type
|
|
|
|
|
|
|
|
# Fix an issue with copying objects from testsuite, need better solution.
|
|
|
|
def init_cases(self):
|
|
|
|
for c in self.testsuite.testcases:
|
|
|
|
self.add_testcase(c.name, freeform=c.freeform)
|
|
|
|
|
|
|
|
def _get_run_id(self):
|
|
|
|
""" generate run id from instance unique identifier and a random
|
|
|
|
number"""
|
|
|
|
|
|
|
|
hash_object = hashlib.md5(self.name.encode())
|
|
|
|
random_str = f"{random.getrandbits(64)}".encode()
|
|
|
|
hash_object.update(random_str)
|
|
|
|
return hash_object.hexdigest()
|
|
|
|
|
|
|
|
def add_missing_case_status(self, status, reason=None):
|
|
|
|
for case in self.testcases:
|
2023-04-03 23:09:09 +00:00
|
|
|
if case.status == 'started':
|
|
|
|
case.status = "failed"
|
|
|
|
elif not case.status:
|
2022-06-09 13:56:26 -04:00
|
|
|
case.status = status
|
|
|
|
if reason:
|
|
|
|
case.reason = reason
|
|
|
|
else:
|
|
|
|
case.reason = self.reason
|
|
|
|
|
|
|
|
def __getstate__(self):
|
|
|
|
d = self.__dict__.copy()
|
|
|
|
return d
|
|
|
|
|
|
|
|
def __setstate__(self, d):
|
|
|
|
self.__dict__.update(d)
|
|
|
|
|
|
|
|
def __lt__(self, other):
|
|
|
|
return self.name < other.name
|
|
|
|
|
|
|
|
def set_case_status_by_name(self, name, status, reason=None):
|
|
|
|
tc = self.get_case_or_create(name)
|
|
|
|
tc.status = status
|
|
|
|
if reason:
|
|
|
|
tc.reason = reason
|
|
|
|
return tc
|
|
|
|
|
|
|
|
def add_testcase(self, name, freeform=False):
|
|
|
|
tc = TestCase(name=name)
|
|
|
|
tc.freeform = freeform
|
|
|
|
self.testcases.append(tc)
|
|
|
|
return tc
|
|
|
|
|
|
|
|
def get_case_by_name(self, name):
|
|
|
|
for c in self.testcases:
|
|
|
|
if c.name == name:
|
|
|
|
return c
|
|
|
|
return None
|
|
|
|
|
|
|
|
def get_case_or_create(self, name):
|
|
|
|
for c in self.testcases:
|
|
|
|
if c.name == name:
|
|
|
|
return c
|
|
|
|
|
|
|
|
logger.debug(f"Could not find a matching testcase for {name}")
|
|
|
|
tc = TestCase(name=name)
|
|
|
|
self.testcases.append(tc)
|
|
|
|
return tc
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def testsuite_runnable(testsuite, fixtures):
|
|
|
|
can_run = False
|
|
|
|
# console harness allows us to run the test and capture data.
|
2023-04-05 15:21:44 +02:00
|
|
|
if testsuite.harness in [ 'console', 'ztest', 'pytest', 'test', 'gtest', 'robot']:
|
2022-06-09 13:56:26 -04:00
|
|
|
can_run = True
|
|
|
|
# if we have a fixture that is also being supplied on the
|
|
|
|
# command-line, then we need to run the test, not just build it.
|
|
|
|
fixture = testsuite.harness_config.get('fixture')
|
|
|
|
if fixture:
|
2023-04-04 12:32:46 +00:00
|
|
|
can_run = fixture in fixtures
|
2022-06-09 13:56:26 -04:00
|
|
|
|
|
|
|
return can_run
|
|
|
|
|
2022-06-10 10:51:01 -04:00
|
|
|
def setup_handler(self, env):
|
2022-06-09 14:48:11 -04:00
|
|
|
if self.handler:
|
|
|
|
return
|
|
|
|
|
2022-06-10 10:51:01 -04:00
|
|
|
options = env.options
|
2022-11-21 14:04:11 -05:00
|
|
|
handler = Handler(self, "")
|
2023-01-17 15:27:42 +00:00
|
|
|
if options.device_testing:
|
|
|
|
handler = DeviceHandler(self, "device")
|
|
|
|
handler.call_make_run = False
|
|
|
|
handler.ready = True
|
|
|
|
elif self.platform.simulation != "na":
|
2022-11-21 14:04:11 -05:00
|
|
|
if self.platform.simulation == "qemu":
|
|
|
|
handler = QEMUHandler(self, "qemu")
|
|
|
|
handler.args.append(f"QEMU_PIPE={handler.get_fifo()}")
|
|
|
|
handler.ready = True
|
|
|
|
else:
|
|
|
|
handler = SimulationHandler(self, self.platform.simulation)
|
|
|
|
|
|
|
|
if self.platform.simulation_exec and shutil.which(self.platform.simulation_exec):
|
|
|
|
handler.ready = True
|
2022-06-09 14:48:11 -04:00
|
|
|
elif self.testsuite.type == "unit":
|
|
|
|
handler = BinaryHandler(self, "unit")
|
|
|
|
handler.binary = os.path.join(self.build_dir, "testbinary")
|
|
|
|
if options.enable_coverage:
|
2022-11-21 14:04:11 -05:00
|
|
|
handler.args.append("COVERAGE=1")
|
2022-06-09 14:48:11 -04:00
|
|
|
handler.call_make_run = False
|
2022-11-21 14:04:11 -05:00
|
|
|
handler.ready = True
|
2022-06-09 14:48:11 -04:00
|
|
|
|
|
|
|
if handler:
|
2022-06-10 10:51:01 -04:00
|
|
|
handler.options = options
|
2022-06-13 09:16:32 -04:00
|
|
|
handler.generator_cmd = env.generator_cmd
|
|
|
|
handler.generator = env.generator
|
2022-06-09 14:48:11 -04:00
|
|
|
handler.suite_name_check = not options.disable_suite_name_check
|
|
|
|
self.handler = handler
|
|
|
|
|
2022-06-09 13:56:26 -04:00
|
|
|
# Global testsuite parameters
|
2023-09-21 11:39:30 +02:00
|
|
|
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[], hardware_map=None):
|
2022-06-09 13:56:26 -04:00
|
|
|
|
2022-07-13 12:58:48 +07:00
|
|
|
# running on simulators is currently not supported on Windows
|
|
|
|
if os.name == 'nt' and self.platform.simulation != 'na':
|
2022-06-09 13:56:26 -04:00
|
|
|
return False
|
|
|
|
|
|
|
|
# we asked for build-only on the command line
|
|
|
|
if self.testsuite.build_only:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Do not run slow tests:
|
|
|
|
skip_slow = self.testsuite.slow and not enable_slow
|
|
|
|
if skip_slow:
|
|
|
|
return False
|
|
|
|
|
|
|
|
target_ready = bool(self.testsuite.type == "unit" or \
|
|
|
|
self.platform.type == "native" or \
|
2022-11-21 14:04:11 -05:00
|
|
|
self.platform.simulation in SUPPORTED_SIMS or \
|
2022-06-09 13:56:26 -04:00
|
|
|
filter == 'runnable')
|
|
|
|
|
2023-05-26 11:43:36 +02:00
|
|
|
# check if test is runnable in pytest
|
|
|
|
if self.testsuite.harness == 'pytest':
|
|
|
|
target_ready = bool(filter == 'runnable' or self.platform.simulation in SUPPORTED_SIMS_IN_PYTEST)
|
2022-06-09 13:56:26 -04:00
|
|
|
|
2023-05-26 11:43:36 +02:00
|
|
|
SUPPORTED_SIMS_WITH_EXEC = ['nsim', 'mdb-nsim', 'renode', 'tsim', 'native']
|
|
|
|
if filter != 'runnable' and \
|
|
|
|
self.platform.simulation in SUPPORTED_SIMS_WITH_EXEC and \
|
|
|
|
self.platform.simulation_exec:
|
|
|
|
if not shutil.which(self.platform.simulation_exec):
|
|
|
|
target_ready = False
|
2022-06-09 13:56:26 -04:00
|
|
|
|
|
|
|
testsuite_runnable = self.testsuite_runnable(self.testsuite, fixtures)
|
|
|
|
|
2023-09-21 11:39:30 +02:00
|
|
|
if hardware_map:
|
|
|
|
for h in hardware_map.duts:
|
|
|
|
if (h.platform == self.platform.name and
|
|
|
|
self.testsuite_runnable(self.testsuite, h.fixtures)):
|
|
|
|
testsuite_runnable = True
|
|
|
|
break
|
|
|
|
|
2022-06-09 13:56:26 -04:00
|
|
|
return testsuite_runnable and target_ready
|
|
|
|
|
|
|
|
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
|
|
|
|
# Create this in a "twister/" subdirectory otherwise this
|
|
|
|
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
|
|
|
|
# will silently give that second time precedence over any
|
|
|
|
# --extra-args=CONFIG_*
|
|
|
|
subdir = os.path.join(self.build_dir, "twister")
|
|
|
|
|
|
|
|
content = ""
|
|
|
|
|
|
|
|
if self.testsuite.extra_configs:
|
2023-01-11 22:25:08 +00:00
|
|
|
new_config_list = []
|
|
|
|
# some configs might be conditional on arch or platform, see if we
|
|
|
|
# have a namespace defined and apply only if the namespace matches.
|
|
|
|
# we currently support both arch: and platform:
|
|
|
|
for config in self.testsuite.extra_configs:
|
|
|
|
cond_config = config.split(":")
|
|
|
|
if cond_config[0] == "arch" and len(cond_config) == 3:
|
|
|
|
if self.platform.arch == cond_config[1]:
|
|
|
|
new_config_list.append(cond_config[2])
|
2023-05-29 11:33:03 +02:00
|
|
|
elif cond_config[0] == "platform" and len(cond_config) == 3:
|
2023-01-11 22:25:08 +00:00
|
|
|
if self.platform.name == cond_config[1]:
|
|
|
|
new_config_list.append(cond_config[2])
|
|
|
|
else:
|
|
|
|
new_config_list.append(config)
|
|
|
|
|
|
|
|
content = "\n".join(new_config_list)
|
2022-06-09 13:56:26 -04:00
|
|
|
|
|
|
|
if enable_coverage:
|
|
|
|
if platform.name in coverage_platform:
|
|
|
|
content = content + "\nCONFIG_COVERAGE=y"
|
|
|
|
content = content + "\nCONFIG_COVERAGE_DUMP=y"
|
|
|
|
|
|
|
|
if enable_asan:
|
|
|
|
if platform.type == "native":
|
|
|
|
content = content + "\nCONFIG_ASAN=y"
|
|
|
|
|
|
|
|
if enable_ubsan:
|
|
|
|
if platform.type == "native":
|
|
|
|
content = content + "\nCONFIG_UBSAN=y"
|
|
|
|
|
|
|
|
if content:
|
|
|
|
os.makedirs(subdir, exist_ok=True)
|
|
|
|
file = os.path.join(subdir, "testsuite_extra.conf")
|
|
|
|
with open(file, "w") as f:
|
|
|
|
f.write(content)
|
|
|
|
|
|
|
|
return content
|
|
|
|
|
2022-10-18 15:45:21 +02:00
|
|
|
def calculate_sizes(self, from_buildlog: bool = False, generate_warning: bool = True) -> SizeCalculator:
|
2022-06-09 13:56:26 -04:00
|
|
|
"""Get the RAM/ROM sizes of a test case.
|
|
|
|
|
|
|
|
This can only be run after the instance has been executed by
|
|
|
|
MakeGenerator, otherwise there won't be any binaries to measure.
|
|
|
|
|
|
|
|
@return A SizeCalculator object
|
|
|
|
"""
|
2022-10-18 15:45:21 +02:00
|
|
|
elf_filepath = self.get_elf_file()
|
|
|
|
buildlog_filepath = self.get_buildlog_file() if from_buildlog else ''
|
|
|
|
return SizeCalculator(elf_filename=elf_filepath,
|
2022-12-01 02:00:26 +00:00
|
|
|
extra_sections=self.testsuite.extra_sections,
|
|
|
|
buildlog_filepath=buildlog_filepath,
|
|
|
|
generate_warning=generate_warning)
|
2022-10-18 15:45:21 +02:00
|
|
|
|
|
|
|
def get_elf_file(self) -> str:
|
2023-04-11 14:41:01 -05:00
|
|
|
|
|
|
|
if self.testsuite.sysbuild:
|
|
|
|
build_dir = self.domains.get_default_domain().build_dir
|
|
|
|
else:
|
|
|
|
build_dir = self.build_dir
|
|
|
|
|
|
|
|
fns = glob.glob(os.path.join(build_dir, "zephyr", "*.elf"))
|
2023-04-13 15:57:34 -06:00
|
|
|
fns.extend(glob.glob(os.path.join(build_dir, "testbinary")))
|
2023-04-11 01:27:56 +00:00
|
|
|
blocklist = [
|
|
|
|
'remapped', # used for xtensa plaforms
|
|
|
|
'zefi', # EFI for Zephyr
|
2023-07-27 12:41:07 +00:00
|
|
|
'qemu', # elf files generated after running in qemu
|
|
|
|
'_pre']
|
2023-04-13 15:57:34 -06:00
|
|
|
fns = [x for x in fns if not any(bad in os.path.basename(x) for bad in blocklist)]
|
2023-07-27 12:41:07 +00:00
|
|
|
if not fns:
|
|
|
|
raise BuildError("Missing output binary")
|
|
|
|
elif len(fns) > 1:
|
|
|
|
logger.warning(f"multiple ELF files detected: {', '.join(fns)}")
|
2022-10-18 15:45:21 +02:00
|
|
|
return fns[0]
|
|
|
|
|
|
|
|
def get_buildlog_file(self) -> str:
|
|
|
|
"""Get path to build.log file.
|
2022-06-09 13:56:26 -04:00
|
|
|
|
2022-10-18 15:45:21 +02:00
|
|
|
@raises BuildError: Incorrect amount (!=1) of build logs.
|
|
|
|
@return: Path to build.log (str).
|
|
|
|
"""
|
|
|
|
buildlog_paths = glob.glob(os.path.join(self.build_dir, "build.log"))
|
|
|
|
if len(buildlog_paths) != 1:
|
|
|
|
raise BuildError("Missing/multiple build.log file.")
|
|
|
|
return buildlog_paths[0]
|
2022-06-09 13:56:26 -04:00
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
return "<TestSuite %s on %s>" % (self.testsuite.name, self.platform.name)
|