2022-06-09 12:12:12 -04:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
# vim: set syntax=python ts=4 :
|
|
|
|
#
|
2023-04-13 16:35:24 +03:00
|
|
|
# Copyright (c) 2018-2022 Intel Corporation
|
2022-07-13 12:58:48 +07:00
|
|
|
# Copyright 2022 NXP
|
2022-06-09 12:12:12 -04:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
|
|
|
import math
|
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import csv
|
|
|
|
import time
|
|
|
|
import signal
|
|
|
|
import logging
|
|
|
|
import shlex
|
|
|
|
import subprocess
|
|
|
|
import threading
|
|
|
|
import select
|
2022-06-23 17:40:57 -04:00
|
|
|
import re
|
2022-08-24 12:50:22 -06:00
|
|
|
import psutil
|
2022-07-19 14:57:42 +02:00
|
|
|
from twisterlib.environment import ZEPHYR_BASE
|
2023-04-07 17:32:50 +02:00
|
|
|
from twisterlib.error import TwisterException
|
2022-10-05 16:06:29 -05:00
|
|
|
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/build_helpers"))
|
|
|
|
from domains import Domains
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
try:
|
|
|
|
import serial
|
|
|
|
except ImportError:
|
|
|
|
print("Install pyserial python module with pip to use --device-testing option.")
|
|
|
|
|
|
|
|
try:
|
|
|
|
import pty
|
|
|
|
except ImportError as capture_error:
|
|
|
|
if os.name == "nt": # "nt" means that program is running on Windows OS
|
|
|
|
pass # "--device-serial-pty" option is not supported on Windows OS
|
|
|
|
else:
|
|
|
|
raise capture_error
|
|
|
|
|
|
|
|
logger = logging.getLogger('twister')
|
|
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
|
2022-11-21 14:04:11 -05:00
|
|
|
SUPPORTED_SIMS = ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim", "native"]
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
class HarnessImporter:
|
|
|
|
|
|
|
|
def __init__(self, name):
|
2022-06-28 09:58:09 -04:00
|
|
|
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister/twisterlib"))
|
2022-06-09 12:12:12 -04:00
|
|
|
module = __import__("harness")
|
|
|
|
if name:
|
|
|
|
my_class = getattr(module, name)
|
|
|
|
else:
|
|
|
|
my_class = getattr(module, "Test")
|
|
|
|
|
|
|
|
self.instance = my_class()
|
|
|
|
|
|
|
|
class Handler:
|
|
|
|
def __init__(self, instance, type_str="build"):
|
|
|
|
"""Constructor
|
|
|
|
|
|
|
|
"""
|
2022-06-10 10:51:01 -04:00
|
|
|
self.options = None
|
|
|
|
|
2022-06-09 12:12:12 -04:00
|
|
|
self.state = "waiting"
|
|
|
|
self.run = False
|
|
|
|
self.type_str = type_str
|
|
|
|
|
|
|
|
self.binary = None
|
|
|
|
self.pid_fn = None
|
2022-06-09 14:48:11 -04:00
|
|
|
self.call_make_run = True
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
self.name = instance.name
|
|
|
|
self.instance = instance
|
|
|
|
self.timeout = math.ceil(instance.testsuite.timeout * instance.platform.timeout_multiplier)
|
|
|
|
self.sourcedir = instance.testsuite.source_dir
|
|
|
|
self.build_dir = instance.build_dir
|
|
|
|
self.log = os.path.join(self.build_dir, "handler.log")
|
|
|
|
self.returncode = 0
|
|
|
|
self.generator = None
|
|
|
|
self.generator_cmd = None
|
|
|
|
self.suite_name_check = True
|
2022-11-21 14:04:11 -05:00
|
|
|
self.ready = False
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
self.args = []
|
|
|
|
self.terminated = False
|
|
|
|
|
|
|
|
def record(self, harness):
|
|
|
|
if harness.recording:
|
|
|
|
filename = os.path.join(self.build_dir, "recording.csv")
|
|
|
|
with open(filename, "at") as csvfile:
|
|
|
|
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
|
|
|
|
cw.writerow(harness.fieldnames)
|
|
|
|
for instance in harness.recording:
|
|
|
|
cw.writerow(instance)
|
|
|
|
|
|
|
|
def terminate(self, proc):
|
|
|
|
# encapsulate terminate functionality so we do it consistently where ever
|
|
|
|
# we might want to terminate the proc. We need try_kill_process_by_pid
|
|
|
|
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
|
|
|
|
# work. Newer ninja's don't seem to pass SIGTERM down to the children
|
|
|
|
# so we need to use try_kill_process_by_pid.
|
|
|
|
for child in psutil.Process(proc.pid).children(recursive=True):
|
|
|
|
try:
|
|
|
|
os.kill(child.pid, signal.SIGTERM)
|
|
|
|
except ProcessLookupError:
|
|
|
|
pass
|
|
|
|
proc.terminate()
|
|
|
|
# sleep for a while before attempting to kill
|
|
|
|
time.sleep(0.5)
|
|
|
|
proc.kill()
|
|
|
|
self.terminated = True
|
|
|
|
|
|
|
|
def _verify_ztest_suite_name(self, harness_state, detected_suite_names, handler_time):
|
|
|
|
"""
|
|
|
|
If test suite names was found in test's C source code, then verify if
|
|
|
|
detected suite names from output correspond to expected suite names
|
|
|
|
(and not in reverse).
|
|
|
|
"""
|
|
|
|
expected_suite_names = self.instance.testsuite.ztest_suite_names
|
2023-05-01 19:11:08 +02:00
|
|
|
logger.debug(f"Expected suite names:{expected_suite_names}")
|
|
|
|
logger.debug(f"Detected suite names:{detected_suite_names}")
|
2022-06-09 12:12:12 -04:00
|
|
|
if not expected_suite_names or \
|
|
|
|
not harness_state == "passed":
|
|
|
|
return
|
|
|
|
if not detected_suite_names:
|
|
|
|
self._missing_suite_name(expected_suite_names, handler_time)
|
|
|
|
for detected_suite_name in detected_suite_names:
|
|
|
|
if detected_suite_name not in expected_suite_names:
|
|
|
|
self._missing_suite_name(expected_suite_names, handler_time)
|
|
|
|
break
|
|
|
|
|
|
|
|
def _missing_suite_name(self, expected_suite_names, handler_time):
|
|
|
|
"""
|
|
|
|
Change result of performed test if problem with missing or unpropper
|
|
|
|
suite name was occurred.
|
|
|
|
"""
|
|
|
|
self.instance.status = "failed"
|
|
|
|
self.instance.execution_time = handler_time
|
|
|
|
for tc in self.instance.testcases:
|
|
|
|
tc.status = "failed"
|
|
|
|
self.instance.reason = f"Testsuite mismatch"
|
|
|
|
logger.debug("Test suite names were not printed or some of them in " \
|
|
|
|
"output do not correspond with expected: %s",
|
|
|
|
str(expected_suite_names))
|
|
|
|
|
|
|
|
def _final_handle_actions(self, harness, handler_time):
|
|
|
|
|
|
|
|
# only for Ztest tests:
|
|
|
|
harness_class_name = type(harness).__name__
|
|
|
|
if self.suite_name_check and harness_class_name == "Test":
|
|
|
|
self._verify_ztest_suite_name(harness.state, harness.detected_suite_names, handler_time)
|
2023-05-01 19:06:47 +02:00
|
|
|
if self.instance.status == 'failed':
|
|
|
|
return
|
2022-06-09 12:12:12 -04:00
|
|
|
if not harness.matched_run_id and harness.run_id_exists:
|
|
|
|
self.instance.status = "failed"
|
|
|
|
self.instance.execution_time = handler_time
|
|
|
|
self.instance.reason = "RunID mismatch"
|
|
|
|
for tc in self.instance.testcases:
|
|
|
|
tc.status = "failed"
|
|
|
|
|
|
|
|
self.record(harness)
|
|
|
|
|
|
|
|
|
|
|
|
class BinaryHandler(Handler):
|
|
|
|
def __init__(self, instance, type_str):
|
|
|
|
"""Constructor
|
|
|
|
|
|
|
|
@param instance Test Instance
|
|
|
|
"""
|
|
|
|
super().__init__(instance, type_str)
|
|
|
|
|
|
|
|
self.call_west_flash = False
|
|
|
|
self.seed = None
|
2022-11-29 17:26:28 -07:00
|
|
|
self.extra_test_args = None
|
2022-10-24 11:54:55 -06:00
|
|
|
self.line = b""
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
def try_kill_process_by_pid(self):
|
|
|
|
if self.pid_fn:
|
|
|
|
pid = int(open(self.pid_fn).read())
|
|
|
|
os.unlink(self.pid_fn)
|
|
|
|
self.pid_fn = None # clear so we don't try to kill the binary twice
|
|
|
|
try:
|
2022-08-10 11:34:47 -07:00
|
|
|
os.kill(pid, signal.SIGKILL)
|
2022-06-09 12:12:12 -04:00
|
|
|
except ProcessLookupError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def _output_reader(self, proc):
|
|
|
|
self.line = proc.stdout.readline()
|
|
|
|
|
|
|
|
def _output_handler(self, proc, harness):
|
|
|
|
if harness.is_pytest:
|
|
|
|
harness.handle(None)
|
|
|
|
return
|
|
|
|
|
2022-10-24 11:54:55 -06:00
|
|
|
with open(self.log, "wt") as log_out_fp:
|
|
|
|
timeout_extended = False
|
|
|
|
timeout_time = time.time() + self.timeout
|
|
|
|
while True:
|
|
|
|
this_timeout = timeout_time - time.time()
|
|
|
|
if this_timeout < 0:
|
|
|
|
break
|
|
|
|
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
|
|
|
|
reader_t.start()
|
|
|
|
reader_t.join(this_timeout)
|
|
|
|
if not reader_t.is_alive() and self.line != b"":
|
|
|
|
line_decoded = self.line.decode('utf-8', "replace")
|
2023-05-02 17:00:30 -05:00
|
|
|
stripped_line = line_decoded.rstrip().removesuffix('\\r\\n')
|
2022-10-24 11:54:55 -06:00
|
|
|
logger.debug("OUTPUT: %s", stripped_line)
|
|
|
|
log_out_fp.write(line_decoded)
|
|
|
|
log_out_fp.flush()
|
|
|
|
harness.handle(stripped_line)
|
|
|
|
if harness.state:
|
|
|
|
if not timeout_extended or harness.capture_coverage:
|
|
|
|
timeout_extended = True
|
|
|
|
if harness.capture_coverage:
|
|
|
|
timeout_time = time.time() + 30
|
|
|
|
else:
|
|
|
|
timeout_time = time.time() + 2
|
|
|
|
else:
|
|
|
|
reader_t.join(0)
|
|
|
|
break
|
|
|
|
try:
|
|
|
|
# POSIX arch based ztests end on their own,
|
|
|
|
# so let's give it up to 100ms to do so
|
|
|
|
proc.wait(0.1)
|
|
|
|
except subprocess.TimeoutExpired:
|
|
|
|
self.terminate(proc)
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
def handle(self):
|
|
|
|
|
|
|
|
harness_name = self.instance.testsuite.harness.capitalize()
|
|
|
|
harness_import = HarnessImporter(harness_name)
|
|
|
|
harness = harness_import.instance
|
|
|
|
harness.configure(self.instance)
|
|
|
|
|
|
|
|
if self.call_make_run:
|
|
|
|
command = [self.generator_cmd, "run"]
|
|
|
|
elif self.call_west_flash:
|
|
|
|
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
|
|
|
|
else:
|
|
|
|
command = [self.binary]
|
|
|
|
|
|
|
|
run_valgrind = False
|
2022-06-13 08:41:53 -04:00
|
|
|
if self.options.enable_valgrind:
|
2022-06-09 12:12:12 -04:00
|
|
|
command = ["valgrind", "--error-exitcode=2",
|
|
|
|
"--leak-check=full",
|
|
|
|
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
|
|
|
|
"--log-file=" + self.build_dir + "/valgrind.log",
|
|
|
|
"--track-origins=yes",
|
|
|
|
] + command
|
|
|
|
run_valgrind = True
|
|
|
|
|
|
|
|
# Only valid for native_posix
|
|
|
|
if self.seed is not None:
|
2022-11-29 17:26:28 -07:00
|
|
|
command.append(f"--seed={self.seed}")
|
|
|
|
if self.extra_test_args is not None:
|
|
|
|
command.extend(self.extra_test_args)
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
logger.debug("Spawning process: " +
|
|
|
|
" ".join(shlex.quote(word) for word in command) + os.linesep +
|
|
|
|
"in directory: " + self.build_dir)
|
|
|
|
|
|
|
|
start_time = time.time()
|
|
|
|
|
|
|
|
env = os.environ.copy()
|
2022-06-13 08:41:53 -04:00
|
|
|
if self.options.enable_asan:
|
2022-06-09 12:12:12 -04:00
|
|
|
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
|
|
|
|
env.get("ASAN_OPTIONS", "")
|
2022-06-13 08:41:53 -04:00
|
|
|
if not self.options.enable_lsan:
|
2022-06-09 12:12:12 -04:00
|
|
|
env["ASAN_OPTIONS"] += "detect_leaks=0"
|
|
|
|
|
2022-06-13 08:41:53 -04:00
|
|
|
if self.options.enable_ubsan:
|
2022-06-09 12:12:12 -04:00
|
|
|
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
|
|
|
|
env.get("UBSAN_OPTIONS", "")
|
|
|
|
|
|
|
|
with subprocess.Popen(command, stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
|
|
|
|
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
|
|
|
|
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
|
|
|
|
t.start()
|
|
|
|
t.join()
|
|
|
|
if t.is_alive():
|
|
|
|
self.terminate(proc)
|
|
|
|
t.join()
|
|
|
|
proc.wait()
|
|
|
|
self.returncode = proc.returncode
|
|
|
|
self.try_kill_process_by_pid()
|
|
|
|
|
|
|
|
handler_time = time.time() - start_time
|
|
|
|
|
2022-06-13 08:41:53 -04:00
|
|
|
if self.options.coverage:
|
2022-06-09 12:12:12 -04:00
|
|
|
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
|
|
|
|
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
|
|
|
|
|
|
|
|
# FIXME: This is needed when killing the simulator, the console is
|
|
|
|
# garbled and needs to be reset. Did not find a better way to do that.
|
|
|
|
if sys.stdout.isatty():
|
2022-08-24 12:44:05 -06:00
|
|
|
subprocess.call(["stty", "sane"], stdin=sys.stdout)
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
if harness.is_pytest:
|
|
|
|
harness.pytest_run(self.log)
|
|
|
|
|
|
|
|
self.instance.execution_time = handler_time
|
|
|
|
if not self.terminated and self.returncode != 0:
|
|
|
|
self.instance.status = "failed"
|
|
|
|
if run_valgrind and self.returncode == 2:
|
|
|
|
self.instance.reason = "Valgrind error"
|
|
|
|
else:
|
|
|
|
# When a process is killed, the default handler returns 128 + SIGTERM
|
|
|
|
# so in that case the return code itself is not meaningful
|
|
|
|
self.instance.reason = "Failed"
|
|
|
|
elif harness.state:
|
|
|
|
self.instance.status = harness.state
|
|
|
|
if harness.state == "failed":
|
|
|
|
self.instance.reason = "Failed"
|
|
|
|
else:
|
|
|
|
self.instance.status = "failed"
|
|
|
|
self.instance.reason = "Timeout"
|
|
|
|
self.instance.add_missing_case_status("blocked", "Timeout")
|
|
|
|
|
|
|
|
self._final_handle_actions(harness, handler_time)
|
|
|
|
|
|
|
|
|
2022-11-21 14:04:11 -05:00
|
|
|
class SimulationHandler(BinaryHandler):
|
|
|
|
def __init__(self, instance, type_str):
|
|
|
|
"""Constructor
|
|
|
|
|
|
|
|
@param instance Test Instance
|
|
|
|
"""
|
|
|
|
super().__init__(instance, type_str)
|
|
|
|
|
|
|
|
if type_str == 'renode':
|
|
|
|
self.pid_fn = os.path.join(instance.build_dir, "renode.pid")
|
|
|
|
elif type_str == 'native':
|
|
|
|
self.call_make_run = False
|
|
|
|
self.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
|
|
|
|
self.ready = True
|
|
|
|
|
2022-06-09 12:12:12 -04:00
|
|
|
class DeviceHandler(Handler):
|
|
|
|
|
|
|
|
def __init__(self, instance, type_str):
|
|
|
|
"""Constructor
|
|
|
|
|
|
|
|
@param instance Test Instance
|
|
|
|
"""
|
|
|
|
super().__init__(instance, type_str)
|
|
|
|
|
2022-07-13 12:58:48 +07:00
|
|
|
def monitor_serial(self, ser, halt_event, harness):
|
2022-06-09 12:12:12 -04:00
|
|
|
if harness.is_pytest:
|
|
|
|
harness.handle(None)
|
|
|
|
return
|
|
|
|
|
|
|
|
log_out_fp = open(self.log, "wt")
|
|
|
|
|
2022-06-13 08:41:53 -04:00
|
|
|
if self.options.coverage:
|
2022-06-09 12:12:12 -04:00
|
|
|
# Set capture_coverage to True to indicate that right after
|
|
|
|
# test results we should get coverage data, otherwise we exit
|
|
|
|
# from the test.
|
|
|
|
harness.capture_coverage = True
|
|
|
|
|
2023-02-16 21:51:14 +01:00
|
|
|
# Clear serial leftover.
|
|
|
|
ser.reset_input_buffer()
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
while ser.isOpen():
|
2022-07-13 12:58:48 +07:00
|
|
|
if halt_event.is_set():
|
2022-06-09 12:12:12 -04:00
|
|
|
logger.debug('halted')
|
|
|
|
ser.close()
|
|
|
|
break
|
2022-07-13 12:58:48 +07:00
|
|
|
|
test: twister: add more exception protection
the ser.in_wait sometime meet issue like serial handler error.
This is because the serial port is in reset status.
add final protection to retry later
error log:
Traceback (most recent call last):
File "/usr/lib/python3.10/threading.py", line 1016, in _bootstrap_inner
self.run()
File "/usr/lib/python3.10/threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "zephyr/scripts/pylib/twister/twisterlib/handlers.py", line 345,
in monitor_serial
if not ser.in_waiting:
File "~/python3.10/site-packages/serial/serialposix.py", line 549,
in in_waiting
s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str)
OSError: [Errno 9] Bad file descriptor
Signed-off-by: Hake Huang <hake.huang@oss.nxp.com>
2022-11-15 11:34:05 +08:00
|
|
|
try:
|
|
|
|
if not ser.in_waiting:
|
|
|
|
# no incoming bytes are waiting to be read from
|
|
|
|
# the serial input buffer, let other threads run
|
|
|
|
time.sleep(0.001)
|
|
|
|
continue
|
|
|
|
# maybe the serial port is still in reset
|
|
|
|
# check status may cause error
|
|
|
|
# wait for more time
|
|
|
|
except OSError:
|
2022-07-13 12:58:48 +07:00
|
|
|
time.sleep(0.001)
|
|
|
|
continue
|
2023-04-10 19:04:04 +02:00
|
|
|
except TypeError:
|
|
|
|
# This exception happens if the serial port was closed and
|
|
|
|
# its file descriptor cleared in between of ser.isOpen()
|
|
|
|
# and ser.in_waiting checks.
|
|
|
|
logger.debug("Serial port is already closed, stop reading.")
|
|
|
|
break
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
serial_line = None
|
|
|
|
try:
|
|
|
|
serial_line = ser.readline()
|
|
|
|
except TypeError:
|
|
|
|
pass
|
|
|
|
# ignore SerialException which may happen during the serial device
|
|
|
|
# power off/on process.
|
|
|
|
except serial.SerialException:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# Just because ser_fileno has data doesn't mean an entire line
|
|
|
|
# is available yet.
|
|
|
|
if serial_line:
|
|
|
|
sl = serial_line.decode('utf-8', 'ignore').lstrip()
|
|
|
|
logger.debug("DEVICE: {0}".format(sl.rstrip()))
|
|
|
|
|
|
|
|
log_out_fp.write(sl)
|
|
|
|
log_out_fp.flush()
|
|
|
|
harness.handle(sl.rstrip())
|
|
|
|
|
|
|
|
if harness.state:
|
|
|
|
if not harness.capture_coverage:
|
|
|
|
ser.close()
|
|
|
|
break
|
|
|
|
|
|
|
|
log_out_fp.close()
|
|
|
|
|
|
|
|
def device_is_available(self, instance):
|
|
|
|
device = instance.platform.name
|
|
|
|
fixture = instance.testsuite.harness_config.get("fixture")
|
2023-04-07 17:32:50 +02:00
|
|
|
dut_found = False
|
|
|
|
|
2022-06-10 10:51:01 -04:00
|
|
|
for d in self.duts:
|
2022-06-09 12:12:12 -04:00
|
|
|
if fixture and fixture not in d.fixtures:
|
|
|
|
continue
|
|
|
|
if d.platform != device or (d.serial is None and d.serial_pty is None):
|
|
|
|
continue
|
2023-04-07 17:32:50 +02:00
|
|
|
dut_found = True
|
2022-06-09 12:12:12 -04:00
|
|
|
d.lock.acquire()
|
|
|
|
avail = False
|
|
|
|
if d.available:
|
|
|
|
d.available = 0
|
|
|
|
d.counter += 1
|
|
|
|
avail = True
|
|
|
|
d.lock.release()
|
|
|
|
if avail:
|
|
|
|
return d
|
|
|
|
|
2023-04-07 17:32:50 +02:00
|
|
|
if not dut_found:
|
|
|
|
raise TwisterException(f"No device to serve as {device} platform.")
|
|
|
|
|
2022-06-09 12:12:12 -04:00
|
|
|
return None
|
|
|
|
|
|
|
|
def make_device_available(self, serial):
|
2022-06-10 10:51:01 -04:00
|
|
|
for d in self.duts:
|
2022-06-09 12:12:12 -04:00
|
|
|
if serial in [d.serial_pty, d.serial]:
|
|
|
|
d.available = 1
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def run_custom_script(script, timeout):
|
|
|
|
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
|
|
|
|
try:
|
|
|
|
stdout, stderr = proc.communicate(timeout=timeout)
|
|
|
|
logger.debug(stdout.decode())
|
|
|
|
if proc.returncode != 0:
|
|
|
|
logger.error(f"Custom script failure: {stderr.decode(errors='ignore')}")
|
|
|
|
|
|
|
|
except subprocess.TimeoutExpired:
|
|
|
|
proc.kill()
|
|
|
|
proc.communicate()
|
|
|
|
logger.error("{} timed out".format(script))
|
|
|
|
|
|
|
|
def handle(self):
|
|
|
|
runner = None
|
|
|
|
|
2023-04-07 17:32:50 +02:00
|
|
|
try:
|
2022-06-09 12:12:12 -04:00
|
|
|
hardware = self.device_is_available(self.instance)
|
2023-04-07 17:32:50 +02:00
|
|
|
while not hardware:
|
|
|
|
time.sleep(1)
|
|
|
|
hardware = self.device_is_available(self.instance)
|
|
|
|
except TwisterException as error:
|
|
|
|
self.instance.status = "failed"
|
|
|
|
self.instance.reason = str(error)
|
|
|
|
logger.error(self.instance.reason)
|
|
|
|
return
|
2022-06-09 12:12:12 -04:00
|
|
|
|
2022-06-10 10:51:01 -04:00
|
|
|
runner = hardware.runner or self.options.west_runner
|
2022-06-09 12:12:12 -04:00
|
|
|
serial_pty = hardware.serial_pty
|
|
|
|
|
|
|
|
ser_pty_process = None
|
|
|
|
if serial_pty:
|
|
|
|
master, slave = pty.openpty()
|
|
|
|
try:
|
|
|
|
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
|
|
|
|
except subprocess.CalledProcessError as error:
|
|
|
|
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
|
|
|
|
return
|
|
|
|
|
|
|
|
serial_device = os.ttyname(slave)
|
|
|
|
else:
|
|
|
|
serial_device = hardware.serial
|
|
|
|
|
|
|
|
logger.debug(f"Using serial device {serial_device} @ {hardware.baud} baud")
|
|
|
|
|
2022-06-10 10:51:01 -04:00
|
|
|
if (self.options.west_flash is not None) or runner:
|
2022-06-09 12:12:12 -04:00
|
|
|
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
|
|
|
|
command_extra_args = []
|
|
|
|
|
|
|
|
# There are three ways this option is used.
|
|
|
|
# 1) bare: --west-flash
|
|
|
|
# This results in options.west_flash == []
|
|
|
|
# 2) with a value: --west-flash="--board-id=42"
|
|
|
|
# This results in options.west_flash == "--board-id=42"
|
|
|
|
# 3) Multiple values: --west-flash="--board-id=42,--erase"
|
|
|
|
# This results in options.west_flash == "--board-id=42 --erase"
|
2022-06-10 10:51:01 -04:00
|
|
|
if self.options.west_flash and self.options.west_flash != []:
|
|
|
|
command_extra_args.extend(self.options.west_flash.split(','))
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
if runner:
|
|
|
|
command.append("--runner")
|
|
|
|
command.append(runner)
|
|
|
|
|
|
|
|
board_id = hardware.probe_id or hardware.id
|
|
|
|
product = hardware.product
|
|
|
|
if board_id is not None:
|
2022-11-30 11:18:13 +01:00
|
|
|
if runner in ("pyocd", "nrfjprog"):
|
2022-06-09 12:12:12 -04:00
|
|
|
command_extra_args.append("--dev-id")
|
|
|
|
command_extra_args.append(board_id)
|
|
|
|
elif runner == "openocd" and product == "STM32 STLink":
|
|
|
|
command_extra_args.append("--cmd-pre-init")
|
|
|
|
command_extra_args.append("hla_serial %s" % (board_id))
|
|
|
|
elif runner == "openocd" and product == "STLINK-V3":
|
|
|
|
command_extra_args.append("--cmd-pre-init")
|
|
|
|
command_extra_args.append("hla_serial %s" % (board_id))
|
|
|
|
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
|
|
|
|
command_extra_args.append("--cmd-pre-init")
|
|
|
|
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
|
|
|
|
elif runner == "jlink":
|
|
|
|
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
|
|
|
|
elif runner == "stm32cubeprogrammer":
|
|
|
|
command.append("--tool-opt=sn=%s" % (board_id))
|
|
|
|
|
2022-07-12 10:28:11 +08:00
|
|
|
# Receive parameters from runner_params field.
|
|
|
|
if hardware.runner_params:
|
|
|
|
for param in hardware.runner_params:
|
|
|
|
command.append(param)
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
if command_extra_args != []:
|
|
|
|
command.append('--')
|
|
|
|
command.extend(command_extra_args)
|
|
|
|
else:
|
|
|
|
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
|
|
|
|
|
|
|
|
pre_script = hardware.pre_script
|
|
|
|
post_flash_script = hardware.post_flash_script
|
|
|
|
post_script = hardware.post_script
|
|
|
|
|
|
|
|
if pre_script:
|
|
|
|
self.run_custom_script(pre_script, 30)
|
|
|
|
|
2023-04-03 10:24:46 +02:00
|
|
|
flash_timeout = hardware.flash_timeout
|
|
|
|
if hardware.flash_with_test:
|
|
|
|
flash_timeout += self.timeout
|
|
|
|
|
2022-06-09 12:12:12 -04:00
|
|
|
try:
|
|
|
|
ser = serial.Serial(
|
|
|
|
serial_device,
|
|
|
|
baudrate=hardware.baud,
|
|
|
|
parity=serial.PARITY_NONE,
|
|
|
|
stopbits=serial.STOPBITS_ONE,
|
|
|
|
bytesize=serial.EIGHTBITS,
|
2023-04-03 10:24:46 +02:00
|
|
|
timeout=max(flash_timeout, self.timeout) # the worst case of no serial input
|
2022-06-09 12:12:12 -04:00
|
|
|
)
|
|
|
|
except serial.SerialException as e:
|
|
|
|
self.instance.status = "failed"
|
|
|
|
self.instance.reason = "Serial Device Error"
|
|
|
|
logger.error("Serial device error: %s" % (str(e)))
|
|
|
|
|
|
|
|
self.instance.add_missing_case_status("blocked", "Serial Device Error")
|
|
|
|
if serial_pty and ser_pty_process:
|
|
|
|
ser_pty_process.terminate()
|
|
|
|
outs, errs = ser_pty_process.communicate()
|
|
|
|
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
|
|
|
|
|
|
|
|
if serial_pty:
|
|
|
|
self.make_device_available(serial_pty)
|
|
|
|
else:
|
|
|
|
self.make_device_available(serial_device)
|
|
|
|
return
|
|
|
|
|
|
|
|
harness_name = self.instance.testsuite.harness.capitalize()
|
|
|
|
harness_import = HarnessImporter(harness_name)
|
|
|
|
harness = harness_import.instance
|
|
|
|
harness.configure(self.instance)
|
2022-07-13 12:58:48 +07:00
|
|
|
halt_monitor_evt = threading.Event()
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
t = threading.Thread(target=self.monitor_serial, daemon=True,
|
2022-07-13 12:58:48 +07:00
|
|
|
args=(ser, halt_monitor_evt, harness))
|
|
|
|
start_time = time.time()
|
2023-04-03 10:24:46 +02:00
|
|
|
t.start()
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
d_log = "{}/device.log".format(self.instance.build_dir)
|
|
|
|
logger.debug('Flash command: %s', command)
|
2022-07-15 09:24:02 +08:00
|
|
|
flash_error = False
|
2022-06-09 12:12:12 -04:00
|
|
|
try:
|
|
|
|
stdout = stderr = None
|
|
|
|
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
|
|
|
|
try:
|
2023-04-03 10:24:46 +02:00
|
|
|
(stdout, stderr) = proc.communicate(timeout=flash_timeout)
|
2022-06-09 12:12:12 -04:00
|
|
|
# ignore unencodable unicode chars
|
|
|
|
logger.debug(stdout.decode(errors = "ignore"))
|
|
|
|
|
|
|
|
if proc.returncode != 0:
|
|
|
|
self.instance.status = "error"
|
|
|
|
self.instance.reason = "Device issue (Flash error?)"
|
2022-07-15 09:24:02 +08:00
|
|
|
flash_error = True
|
2022-06-09 12:12:12 -04:00
|
|
|
with open(d_log, "w") as dlog_fp:
|
|
|
|
dlog_fp.write(stderr.decode())
|
2022-07-13 12:58:48 +07:00
|
|
|
halt_monitor_evt.set()
|
2022-06-09 12:12:12 -04:00
|
|
|
except subprocess.TimeoutExpired:
|
|
|
|
logger.warning("Flash operation timed out.")
|
2022-07-13 12:58:48 +07:00
|
|
|
self.terminate(proc)
|
2022-06-09 12:12:12 -04:00
|
|
|
(stdout, stderr) = proc.communicate()
|
|
|
|
self.instance.status = "error"
|
|
|
|
self.instance.reason = "Device issue (Timeout)"
|
2022-07-15 09:24:02 +08:00
|
|
|
flash_error = True
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
with open(d_log, "w") as dlog_fp:
|
|
|
|
dlog_fp.write(stderr.decode())
|
|
|
|
|
|
|
|
except subprocess.CalledProcessError:
|
2022-07-13 12:58:48 +07:00
|
|
|
halt_monitor_evt.set()
|
2022-08-17 01:03:40 +08:00
|
|
|
self.instance.status = "error"
|
|
|
|
self.instance.reason = "Device issue (Flash error)"
|
|
|
|
flash_error = True
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
if post_flash_script:
|
|
|
|
self.run_custom_script(post_flash_script, 30)
|
|
|
|
|
2022-07-15 09:24:02 +08:00
|
|
|
if not flash_error:
|
2023-04-03 10:24:46 +02:00
|
|
|
# Always wait at most the test timeout here after flashing.
|
2022-07-15 09:24:02 +08:00
|
|
|
t.join(self.timeout)
|
2022-08-17 01:03:40 +08:00
|
|
|
else:
|
|
|
|
# When the flash error is due exceptions,
|
|
|
|
# twister tell the monitor serial thread
|
|
|
|
# to close the serial. But it is necessary
|
|
|
|
# for this thread being run first and close
|
|
|
|
# have the change to close the serial.
|
|
|
|
t.join(0.1)
|
|
|
|
|
|
|
|
if t.is_alive():
|
|
|
|
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
if ser.isOpen():
|
|
|
|
ser.close()
|
|
|
|
|
|
|
|
if serial_pty:
|
|
|
|
ser_pty_process.terminate()
|
|
|
|
outs, errs = ser_pty_process.communicate()
|
|
|
|
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
|
|
|
|
|
|
|
|
handler_time = time.time() - start_time
|
|
|
|
|
|
|
|
if harness.is_pytest:
|
|
|
|
harness.pytest_run(self.log)
|
|
|
|
|
|
|
|
self.instance.execution_time = handler_time
|
|
|
|
if harness.state:
|
|
|
|
self.instance.status = harness.state
|
|
|
|
if harness.state == "failed":
|
|
|
|
self.instance.reason = "Failed"
|
2022-07-15 09:24:02 +08:00
|
|
|
elif not flash_error:
|
2023-03-20 18:55:48 +00:00
|
|
|
self.instance.status = "failed"
|
2023-03-10 06:14:34 +00:00
|
|
|
self.instance.reason = "Timeout"
|
2022-06-09 12:12:12 -04:00
|
|
|
|
2023-04-03 23:09:09 +00:00
|
|
|
if self.instance.status in ["error", "failed"]:
|
2022-06-09 12:12:12 -04:00
|
|
|
self.instance.add_missing_case_status("blocked", self.instance.reason)
|
|
|
|
|
2023-05-01 19:00:13 +02:00
|
|
|
self._final_handle_actions(harness, handler_time)
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
if post_script:
|
|
|
|
self.run_custom_script(post_script, 30)
|
|
|
|
|
|
|
|
if serial_pty:
|
|
|
|
self.make_device_available(serial_pty)
|
|
|
|
else:
|
|
|
|
self.make_device_available(serial_device)
|
|
|
|
|
|
|
|
class QEMUHandler(Handler):
|
|
|
|
"""Spawns a thread to monitor QEMU output from pipes
|
|
|
|
|
|
|
|
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
|
|
|
|
We need to do this as once qemu starts, it runs forever until killed.
|
|
|
|
Test cases emit special messages to the console as they run, we check
|
|
|
|
for these to collect whether the test passed or failed.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, instance, type_str):
|
|
|
|
"""Constructor
|
|
|
|
|
|
|
|
@param instance Test instance
|
|
|
|
"""
|
|
|
|
|
|
|
|
super().__init__(instance, type_str)
|
|
|
|
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
|
|
|
|
|
|
|
|
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
|
|
|
|
|
2022-11-24 09:53:21 -05:00
|
|
|
if instance.testsuite.ignore_qemu_crash:
|
2022-06-09 12:12:12 -04:00
|
|
|
self.ignore_qemu_crash = True
|
|
|
|
self.ignore_unexpected_eof = True
|
|
|
|
else:
|
|
|
|
self.ignore_qemu_crash = False
|
|
|
|
self.ignore_unexpected_eof = False
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _get_cpu_time(pid):
|
|
|
|
"""get process CPU time.
|
|
|
|
|
|
|
|
The guest virtual time in QEMU icount mode isn't host time and
|
|
|
|
it's maintained by counting guest instructions, so we use QEMU
|
|
|
|
process execution time to mostly simulate the time of guest OS.
|
|
|
|
"""
|
|
|
|
proc = psutil.Process(pid)
|
|
|
|
cpu_time = proc.cpu_times()
|
|
|
|
return cpu_time.user + cpu_time.system
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
|
|
|
|
ignore_unexpected_eof=False):
|
|
|
|
fifo_in = fifo_fn + ".in"
|
|
|
|
fifo_out = fifo_fn + ".out"
|
|
|
|
|
|
|
|
# These in/out nodes are named from QEMU's perspective, not ours
|
|
|
|
if os.path.exists(fifo_in):
|
|
|
|
os.unlink(fifo_in)
|
|
|
|
os.mkfifo(fifo_in)
|
|
|
|
if os.path.exists(fifo_out):
|
|
|
|
os.unlink(fifo_out)
|
|
|
|
os.mkfifo(fifo_out)
|
|
|
|
|
|
|
|
# We don't do anything with out_fp but we need to open it for
|
|
|
|
# writing so that QEMU doesn't block, due to the way pipes work
|
|
|
|
out_fp = open(fifo_in, "wb")
|
|
|
|
# Disable internal buffering, we don't
|
|
|
|
# want read() or poll() to ever block if there is data in there
|
|
|
|
in_fp = open(fifo_out, "rb", buffering=0)
|
|
|
|
log_out_fp = open(logfile, "wt")
|
|
|
|
|
|
|
|
start_time = time.time()
|
|
|
|
timeout_time = start_time + timeout
|
|
|
|
p = select.poll()
|
|
|
|
p.register(in_fp, select.POLLIN)
|
|
|
|
out_state = None
|
|
|
|
|
|
|
|
line = ""
|
|
|
|
timeout_extended = False
|
|
|
|
|
|
|
|
pid = 0
|
|
|
|
if os.path.exists(pid_fn):
|
|
|
|
pid = int(open(pid_fn).read())
|
|
|
|
|
|
|
|
while True:
|
|
|
|
this_timeout = int((timeout_time - time.time()) * 1000)
|
|
|
|
if this_timeout < 0 or not p.poll(this_timeout):
|
|
|
|
try:
|
|
|
|
if pid and this_timeout > 0:
|
|
|
|
#there's possibility we polled nothing because
|
|
|
|
#of not enough CPU time scheduled by host for
|
|
|
|
#QEMU process during p.poll(this_timeout)
|
|
|
|
cpu_time = QEMUHandler._get_cpu_time(pid)
|
|
|
|
if cpu_time < timeout and not out_state:
|
|
|
|
timeout_time = time.time() + (timeout - cpu_time)
|
|
|
|
continue
|
|
|
|
except ProcessLookupError:
|
|
|
|
out_state = "failed"
|
|
|
|
break
|
|
|
|
|
|
|
|
if not out_state:
|
|
|
|
out_state = "timeout"
|
|
|
|
break
|
|
|
|
|
|
|
|
if pid == 0 and os.path.exists(pid_fn):
|
|
|
|
pid = int(open(pid_fn).read())
|
|
|
|
|
|
|
|
if harness.is_pytest:
|
|
|
|
harness.handle(None)
|
|
|
|
out_state = harness.state
|
|
|
|
break
|
|
|
|
|
|
|
|
try:
|
|
|
|
c = in_fp.read(1).decode("utf-8")
|
|
|
|
except UnicodeDecodeError:
|
|
|
|
# Test is writing something weird, fail
|
|
|
|
out_state = "unexpected byte"
|
|
|
|
break
|
|
|
|
|
|
|
|
if c == "":
|
|
|
|
# EOF, this shouldn't happen unless QEMU crashes
|
|
|
|
if not ignore_unexpected_eof:
|
|
|
|
out_state = "unexpected eof"
|
|
|
|
break
|
|
|
|
line = line + c
|
|
|
|
if c != "\n":
|
|
|
|
continue
|
|
|
|
|
|
|
|
# line contains a full line of data output from QEMU
|
|
|
|
log_out_fp.write(line)
|
|
|
|
log_out_fp.flush()
|
|
|
|
line = line.strip()
|
|
|
|
logger.debug(f"QEMU ({pid}): {line}")
|
|
|
|
|
|
|
|
harness.handle(line)
|
|
|
|
if harness.state:
|
|
|
|
# if we have registered a fail make sure the state is not
|
|
|
|
# overridden by a false success message coming from the
|
|
|
|
# testsuite
|
|
|
|
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
|
|
|
|
out_state = harness.state
|
|
|
|
|
|
|
|
# if we get some state, that means test is doing well, we reset
|
|
|
|
# the timeout and wait for 2 more seconds to catch anything
|
|
|
|
# printed late. We wait much longer if code
|
|
|
|
# coverage is enabled since dumping this information can
|
|
|
|
# take some time.
|
|
|
|
if not timeout_extended or harness.capture_coverage:
|
|
|
|
timeout_extended = True
|
|
|
|
if harness.capture_coverage:
|
|
|
|
timeout_time = time.time() + 30
|
|
|
|
else:
|
|
|
|
timeout_time = time.time() + 2
|
|
|
|
line = ""
|
|
|
|
|
|
|
|
if harness.is_pytest:
|
|
|
|
harness.pytest_run(logfile)
|
|
|
|
out_state = harness.state
|
|
|
|
|
|
|
|
handler_time = time.time() - start_time
|
|
|
|
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
|
|
|
|
|
|
|
|
handler.instance.execution_time = handler_time
|
|
|
|
if out_state == "timeout":
|
|
|
|
handler.instance.status = "failed"
|
|
|
|
handler.instance.reason = "Timeout"
|
|
|
|
elif out_state == "failed":
|
|
|
|
handler.instance.status = "failed"
|
|
|
|
handler.instance.reason = "Failed"
|
|
|
|
elif out_state in ['unexpected eof', 'unexpected byte']:
|
|
|
|
handler.instance.status = "failed"
|
|
|
|
handler.instance.reason = out_state
|
|
|
|
else:
|
|
|
|
handler.instance.status = out_state
|
|
|
|
handler.instance.reason = "Unknown"
|
|
|
|
|
|
|
|
log_out_fp.close()
|
|
|
|
out_fp.close()
|
|
|
|
in_fp.close()
|
|
|
|
if pid:
|
|
|
|
try:
|
|
|
|
if pid:
|
|
|
|
os.kill(pid, signal.SIGTERM)
|
|
|
|
except ProcessLookupError:
|
|
|
|
# Oh well, as long as it's dead! User probably sent Ctrl-C
|
|
|
|
pass
|
|
|
|
|
|
|
|
os.unlink(fifo_in)
|
|
|
|
os.unlink(fifo_out)
|
|
|
|
|
|
|
|
def handle(self):
|
|
|
|
self.results = {}
|
|
|
|
self.run = True
|
|
|
|
|
|
|
|
# We pass this to QEMU which looks for fifos with .in and .out
|
|
|
|
# suffixes.
|
|
|
|
|
2022-10-05 16:06:29 -05:00
|
|
|
if self.instance.testsuite.sysbuild:
|
|
|
|
# Load domain yaml to get default domain build directory
|
|
|
|
# Note: for targets using QEMU, we assume that the target will
|
|
|
|
# have added any additional images to the run target manually
|
|
|
|
domain_path = os.path.join(self.build_dir, "domains.yaml")
|
|
|
|
domains = Domains.from_file(domain_path)
|
|
|
|
logger.debug("Loaded sysbuild domain data from %s" % (domain_path))
|
|
|
|
build_dir = domains.get_default_domain().build_dir
|
|
|
|
else:
|
|
|
|
build_dir = self.build_dir
|
|
|
|
|
|
|
|
# QEMU fifo will use main build dir
|
2022-06-09 12:12:12 -04:00
|
|
|
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
|
2022-10-05 16:06:29 -05:00
|
|
|
# PID file will be created in the main sysbuild app's build dir
|
|
|
|
self.pid_fn = os.path.join(build_dir, "qemu.pid")
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
if os.path.exists(self.pid_fn):
|
|
|
|
os.unlink(self.pid_fn)
|
|
|
|
|
|
|
|
self.log_fn = self.log
|
|
|
|
|
|
|
|
harness_import = HarnessImporter(self.instance.testsuite.harness.capitalize())
|
|
|
|
harness = harness_import.instance
|
|
|
|
harness.configure(self.instance)
|
|
|
|
|
|
|
|
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
|
|
|
|
args=(self, self.timeout, self.build_dir,
|
|
|
|
self.log_fn, self.fifo_fn,
|
|
|
|
self.pid_fn, self.results, harness,
|
|
|
|
self.ignore_unexpected_eof))
|
|
|
|
|
|
|
|
self.thread.daemon = True
|
|
|
|
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
|
|
|
|
self.thread.start()
|
|
|
|
if sys.stdout.isatty():
|
2022-08-24 12:44:05 -06:00
|
|
|
subprocess.call(["stty", "sane"], stdin=sys.stdout)
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
logger.debug("Running %s (%s)" % (self.name, self.type_str))
|
|
|
|
command = [self.generator_cmd]
|
2022-10-05 16:06:29 -05:00
|
|
|
command += ["-C", build_dir, "run"]
|
2022-06-09 12:12:12 -04:00
|
|
|
|
|
|
|
is_timeout = False
|
|
|
|
qemu_pid = None
|
|
|
|
|
|
|
|
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
|
|
|
|
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
|
|
|
|
|
|
|
|
try:
|
|
|
|
proc.wait(self.timeout)
|
|
|
|
except subprocess.TimeoutExpired:
|
|
|
|
# sometimes QEMU can't handle SIGTERM signal correctly
|
|
|
|
# in that case kill -9 QEMU process directly and leave
|
|
|
|
# twister to judge testing result by console output
|
|
|
|
|
|
|
|
is_timeout = True
|
|
|
|
self.terminate(proc)
|
|
|
|
if harness.state == "passed":
|
|
|
|
self.returncode = 0
|
|
|
|
else:
|
|
|
|
self.returncode = proc.returncode
|
|
|
|
else:
|
|
|
|
if os.path.exists(self.pid_fn):
|
|
|
|
qemu_pid = int(open(self.pid_fn).read())
|
|
|
|
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
|
|
|
|
self.returncode = proc.returncode
|
|
|
|
# Need to wait for harness to finish processing
|
|
|
|
# output from QEMU. Otherwise it might miss some
|
|
|
|
# error messages.
|
|
|
|
self.thread.join(0)
|
|
|
|
if self.thread.is_alive():
|
|
|
|
logger.debug("Timed out while monitoring QEMU output")
|
|
|
|
|
|
|
|
if os.path.exists(self.pid_fn):
|
|
|
|
qemu_pid = int(open(self.pid_fn).read())
|
|
|
|
os.unlink(self.pid_fn)
|
|
|
|
|
|
|
|
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
|
|
|
|
|
|
|
|
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
|
|
|
|
self.instance.status = "failed"
|
|
|
|
if is_timeout:
|
|
|
|
self.instance.reason = "Timeout"
|
|
|
|
else:
|
|
|
|
if not self.instance.reason:
|
|
|
|
self.instance.reason = "Exited with {}".format(self.returncode)
|
|
|
|
self.instance.add_missing_case_status("blocked")
|
|
|
|
|
|
|
|
self._final_handle_actions(harness, 0)
|
|
|
|
|
|
|
|
def get_fifo(self):
|
|
|
|
return self.fifo_fn
|