testsuite: ztest: Add framework for stress testing
Added framework for concurrency testing. Framework setup multiple priority contexts and executes user handlers in that context. Test terminates after certain number of repetitions or preemptions or when timeout occurs. It can also be aborted by the user. Framework can be used for testing resiliency to preemptions. Signed-off-by: Krzysztof Chruscinski <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
parent
8367df2956
commit
16bd0df2fd
5 changed files with 734 additions and 0 deletions
|
@ -9,3 +9,4 @@ zephyr_library()
|
|||
zephyr_library_sources( src/ztest.c)
|
||||
zephyr_library_sources( src/ztest_error_hook.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_ZTEST_MOCKING src/ztest_mock.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_ZTRESS src/ztress.c)
|
||||
|
|
|
@ -87,3 +87,29 @@ config ZTEST_PARAMETER_COUNT
|
|||
depends on ZTEST_MOCKING
|
||||
help
|
||||
Maximum amount of concurrent return values / expected parameters.
|
||||
|
||||
config ZTRESS
|
||||
bool "Stress test framework"
|
||||
select THREAD_RUNTIME_STATS
|
||||
select THREAD_MONITOR
|
||||
select TEST_RANDOM_GENERATOR if !ENTROPY_HAS_DRIVER
|
||||
depends on !USERSPACE
|
||||
|
||||
if ZTRESS
|
||||
|
||||
config ZTRESS_MAX_THREADS
|
||||
int "Maximum number of threads in ztress framework"
|
||||
default 3
|
||||
range 1 16
|
||||
|
||||
config ZTRESS_STACK_SIZE
|
||||
int "Stack size of Ztress thread"
|
||||
default 4096 if NO_OPTIMIZATIONS
|
||||
default 2048
|
||||
|
||||
config ZTRESS_REPORT_PROGRESS_MS
|
||||
int "Progress report interval (in milliseconds)"
|
||||
default 1000
|
||||
help
|
||||
Use 0 to disable.
|
||||
endif # ZTRESS
|
||||
|
|
232
subsys/testsuite/ztest/include/ztress.h
Normal file
232
subsys/testsuite/ztest/include/ztress.h
Normal file
|
@ -0,0 +1,232 @@
|
|||
/*
|
||||
* Copyright (c) 2021 Nordic Semiconductor ASA
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#ifndef TESTSUITE_ZTEST_INCLUDE_ZTRESS_H__
|
||||
#define TESTSUITE_ZTEST_INCLUDE_ZTRESS_H__
|
||||
|
||||
#include <sys/util.h>
|
||||
#include <kernel.h>
|
||||
|
||||
/** @internal Internal ID's to distinguish context type. */
|
||||
#define ZTRESS_ID_THREAD 0
|
||||
#define ZTRESS_ID_K_TIMER 1
|
||||
|
||||
/** @brief Descriptor of a k_timer handler execution context.
|
||||
*
|
||||
* The handler is executed in the k_timer handler context which typically means
|
||||
* interrupt context. This context will preempt any other used in the set.
|
||||
*
|
||||
* @note There can only be up to one k_timer context in the set and it must be the
|
||||
* first argument of @ref ZTRESS_EXECUTE.
|
||||
*
|
||||
* @param handler User handler of type @ref ztress_handler.
|
||||
*
|
||||
* @param user_data User data passed to the @p handler.
|
||||
*
|
||||
* @param exec_cnt Number of handler executions to complete the test. If 0 then
|
||||
* this is not included in completion criteria.
|
||||
*
|
||||
* @param init_timeout Initial backoff time base (given in @ref k_timeout_t). It
|
||||
* is adjusted during the test to optimize CPU load. The actual timeout used for
|
||||
* the timer is randomized.
|
||||
*/
|
||||
#define ZTRESS_TIMER(handler, user_data, exec_cnt, init_timeout) \
|
||||
(ZTRESS_ID_K_TIMER, handler, user_data, exec_cnt, 0, init_timeout)
|
||||
|
||||
/** @brief Descriptor of a thread execution context.
|
||||
*
|
||||
* The handler is executed in the thread context. The priority of the thread is
|
||||
* determined based on the order in which contexts are listed in @ref ZTRESS_EXECUTE.
|
||||
*
|
||||
* @note thread sleeps for random amount of time. Additionally, the thread busy-waits
|
||||
* for a random length of time to further increase randomization in the test.
|
||||
*
|
||||
* @param handler User handler of type @ref ztress_handler.
|
||||
*
|
||||
* @param user_data User data passed to the @p handler.
|
||||
*
|
||||
* @param exec_cnt Number of handler executions to complete the test. If 0 then
|
||||
* this is not included in completion criteria.
|
||||
*
|
||||
* @param preempt_cnt Number of preemptions of that context to complete the test.
|
||||
* If 0 then this is not included in completion criteria.
|
||||
*
|
||||
* @param init_timeout Initial backoff time base (given in @ref k_timeout_t). It
|
||||
* is adjusted during the test to optimize CPU load. The actual timeout used for
|
||||
* sleeping is randomized.
|
||||
*/
|
||||
#define ZTRESS_THREAD(handler, user_data, exec_cnt, preempt_cnt, init_timeout) \
|
||||
(ZTRESS_ID_THREAD, handler, user_data, exec_cnt, preempt_cnt, init_timeout)
|
||||
|
||||
/** @brief User handler called in one of the configured contexts.
|
||||
*
|
||||
* @param user_data User data provided in the context descriptor.
|
||||
*
|
||||
* @param cnt Current execution counter. Counted from 0.
|
||||
*
|
||||
* @param last Flag set to true indicates that it is the last execution because
|
||||
* completion criteria are met, test timed out or was aborted.
|
||||
*
|
||||
* @param prio Context priority counting from 0 which indicates the highest priority.
|
||||
*
|
||||
* @retval true continue test.
|
||||
* @retval false stop executing the current context.
|
||||
*/
|
||||
typedef bool (*ztress_handler)(void *user_data, uint32_t cnt, bool last, int prio);
|
||||
|
||||
/** @internal Context structure. */
|
||||
struct ztress_context_data {
|
||||
/* Handler. */
|
||||
ztress_handler handler;
|
||||
|
||||
/* User data */
|
||||
void *user_data;
|
||||
|
||||
/* Minimum number of executions to complete the test. */
|
||||
uint32_t exec_cnt;
|
||||
|
||||
/* Minimum number of preemptions to complete the test. Valid only for
|
||||
* thread context.
|
||||
*/
|
||||
uint32_t preempt_cnt;
|
||||
|
||||
/* Initial timeout. */
|
||||
k_timeout_t t;
|
||||
};
|
||||
|
||||
/** @brief Initialize context structure.
|
||||
*
|
||||
* For argument types see @ref ztress_context_data. For more details see
|
||||
* @ref ZTRESS_THREAD.
|
||||
*
|
||||
* @param _handler Handler.
|
||||
* @param _user_data User data passed to the handler.
|
||||
* @param _exec_cnt Execution count limit.
|
||||
* @param _preempt_cnt Preemption count limit.
|
||||
* @param _t Initial timeout.
|
||||
*/
|
||||
#define ZTRESS_CONTEXT_INITIALIZER(_handler, _user_data, _exec_cnt, _preempt_cnt, _t) \
|
||||
{ \
|
||||
.handler = (_handler), \
|
||||
.user_data = (_user_data), \
|
||||
.exec_cnt = (_exec_cnt), \
|
||||
.preempt_cnt = (_preempt_cnt), \
|
||||
.t = (_t) \
|
||||
}
|
||||
|
||||
/** @internal Strip first argument (context type) and call struct initializer. */
|
||||
#define Z_ZTRESS_GET_HANDLER_DATA2(_, ...) \
|
||||
ZTRESS_CONTEXT_INITIALIZER(__VA_ARGS__)
|
||||
|
||||
/** @internal Macro for initializing context data. */
|
||||
#define Z_ZTRESS_GET_HANDLER_DATA(data) \
|
||||
Z_ZTRESS_GET_HANDLER_DATA2 data
|
||||
|
||||
/** @internal Macro for checking if provided context is a timer context. */
|
||||
#define Z_ZTRESS_HAS_TIMER(data, ...) \
|
||||
GET_ARG_N(1, __DEBRACKET data)
|
||||
|
||||
/** @internal If context descriptor is @ref ZTRESS_TIMER, it returns index of that
|
||||
* descriptor in the list of arguments.
|
||||
*/
|
||||
#define Z_ZTRESS_TIMER_IDX(idx, data) \
|
||||
((GET_ARG_N(1, __DEBRACKET data)) == ZTRESS_ID_K_TIMER ? idx : 0)
|
||||
|
||||
/** @intenal Macro validates that @ref ZTRESS_TIMER context is not used except for
|
||||
* the first item in the list of contexts.
|
||||
*/
|
||||
#define Z_ZTRESS_TIMER_CONTEXT_VALIDATE(...) \
|
||||
BUILD_ASSERT((FOR_EACH_IDX(Z_ZTRESS_TIMER_IDX, (+), __VA_ARGS__)) == 0, \
|
||||
"There can only be up to one ZTRESS_TIMER context and it must " \
|
||||
"be the first in the list")
|
||||
|
||||
/** @brief Setup and run stress test.
|
||||
*
|
||||
* It initialises all contexts and calls @ref ztress_execute.
|
||||
*
|
||||
* @param ... List of contexts. Contexts are configured using @ref ZTRESS_TIMER
|
||||
* and @ref ZTRESS_THREAD macros. @ref ZTRESS_TIMER must be the first argument if
|
||||
* used. Each thread context has an assigned priority. The priority is assigned in
|
||||
* a descending order (first listed thread context has the highest priority). The
|
||||
* number of supported thread contexts is configurable in Kconfig.
|
||||
*/
|
||||
#define ZTRESS_EXECUTE(...) do { \
|
||||
Z_ZTRESS_TIMER_CONTEXT_VALIDATE(__VA_ARGS__); \
|
||||
int has_timer = Z_ZTRESS_HAS_TIMER(__VA_ARGS__); \
|
||||
struct ztress_context_data data[] = { \
|
||||
FOR_EACH(Z_ZTRESS_GET_HANDLER_DATA, (,), __VA_ARGS__) \
|
||||
}; \
|
||||
size_t cnt = ARRAY_SIZE(data) - has_timer; \
|
||||
int err = ztress_execute(has_timer ? &data[0] : NULL, &data[has_timer], cnt); \
|
||||
\
|
||||
zassert_equal(err, 0, "ztress_execute failed (err: %d)", err); \
|
||||
} while (0)
|
||||
|
||||
/** Execute contexts.
|
||||
*
|
||||
* The test runs until all completion requirements are met or until the test times out
|
||||
* (use @ref ztress_set_timeout to configure timeout) or until the test is aborted
|
||||
* (@ref ztress_abort).
|
||||
*
|
||||
* on test completion a report is printed (@ref ztress_report is called internally).
|
||||
*
|
||||
* @param timer_data Timer context. NULL if timer context is not used.
|
||||
* @param thread_data List of thread contexts descriptors in priority descending order.
|
||||
* @param cnt Number of thread contexts.
|
||||
*
|
||||
* @retval -EINVAL If configuration is invalid.
|
||||
* @retval 0 if test is successfully performed.
|
||||
*/
|
||||
int ztress_execute(struct ztress_context_data *timer_data,
|
||||
struct ztress_context_data *thread_data,
|
||||
size_t cnt);
|
||||
|
||||
/** @brief Abort ongoing stress test. */
|
||||
void ztress_abort(void);
|
||||
|
||||
/** @brief Set test timeout.
|
||||
*
|
||||
* Test is terminated after timeout disregarding completion criteria. Setting
|
||||
* is persistent between executions.
|
||||
*
|
||||
* @param t Timeout.
|
||||
*/
|
||||
void ztress_set_timeout(k_timeout_t t);
|
||||
|
||||
/** @brief Print last test report.
|
||||
*
|
||||
* Report contains number of executions and preemptions for each context, initial
|
||||
* and adjusted timeouts and CPU load during the test.
|
||||
*/
|
||||
void ztress_report(void);
|
||||
|
||||
/** @brief Get number of executions of a given context in the last test.
|
||||
*
|
||||
* @param id Context id. 0 means the highest priority.
|
||||
*
|
||||
* @return Number of executions.
|
||||
*/
|
||||
int ztress_exec_count(uint32_t id);
|
||||
|
||||
/** @brief Get number of preemptions of a given context in the last test.
|
||||
*
|
||||
* @param id Context id. 0 means the highest priority.
|
||||
*
|
||||
* @return Number of preemptions.
|
||||
*/
|
||||
int ztress_preempt_count(uint32_t id);
|
||||
|
||||
/** @brief Get optimized timeout base of a given context in the last test.
|
||||
*
|
||||
* Optimized value can be used to update initial value. It will improve the test
|
||||
* since optimal CPU load will be reach immediately.
|
||||
*
|
||||
* @param id Context id. 0 means the highest priority.
|
||||
*
|
||||
* @return Optimized timeout base.
|
||||
*/
|
||||
uint32_t ztress_optimized_ticks(uint32_t id);
|
||||
|
||||
#endif /* TESTSUITE_ZTEST_INCLUDE_ZTRESS_H__ */
|
421
subsys/testsuite/ztest/src/ztress.c
Normal file
421
subsys/testsuite/ztest/src/ztress.c
Normal file
|
@ -0,0 +1,421 @@
|
|||
/*
|
||||
* Copyright (c) 2021 Nordic Semiconductor ASA
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#include <ztress.h>
|
||||
#include <sys/printk.h>
|
||||
#include <random/rand32.h>
|
||||
#include <string.h>
|
||||
|
||||
|
||||
/* Timer used for adjusting contexts backoff time to get optimal CPU load. */
|
||||
static void ctrl_timeout(struct k_timer *timer);
|
||||
K_TIMER_DEFINE(ctrl_timer, ctrl_timeout, NULL);
|
||||
|
||||
/* Timer used for reporting test progress. */
|
||||
static void progress_timeout(struct k_timer *timer);
|
||||
K_TIMER_DEFINE(progress_timer, progress_timeout, NULL);
|
||||
|
||||
/* Timer used for higher priority context. */
|
||||
static void ztress_timeout(struct k_timer *timer);
|
||||
K_TIMER_DEFINE(ztress_timer, ztress_timeout, NULL);
|
||||
|
||||
/* Timer handling test timeout which ends test prematurely. */
|
||||
static k_timeout_t timeout;
|
||||
static void test_timeout(struct k_timer *timer);
|
||||
K_TIMER_DEFINE(test_timer, test_timeout, NULL);
|
||||
|
||||
static atomic_t active_cnt;
|
||||
static struct k_thread threads[CONFIG_ZTRESS_MAX_THREADS];
|
||||
static k_tid_t tids[CONFIG_ZTRESS_MAX_THREADS];
|
||||
|
||||
static uint32_t context_cnt;
|
||||
struct ztress_context_data *tmr_data;
|
||||
|
||||
static atomic_t active_mask;
|
||||
static uint32_t preempt_cnt[CONFIG_ZTRESS_MAX_THREADS];
|
||||
static uint32_t exec_cnt[CONFIG_ZTRESS_MAX_THREADS];
|
||||
static k_timeout_t backoff[CONFIG_ZTRESS_MAX_THREADS];
|
||||
static k_timeout_t init_backoff[CONFIG_ZTRESS_MAX_THREADS];
|
||||
K_THREAD_STACK_ARRAY_DEFINE(stacks, CONFIG_ZTRESS_MAX_THREADS, CONFIG_ZTRESS_STACK_SIZE);
|
||||
static k_tid_t idle_tid;
|
||||
|
||||
#define THREAD_NAME(i, _) STRINGIFY(ztress_##i),
|
||||
|
||||
static const char * const thread_names[] = {
|
||||
UTIL_LISTIFY(CONFIG_ZTRESS_MAX_THREADS, THREAD_NAME, _)
|
||||
};
|
||||
|
||||
struct ztress_runtime {
|
||||
uint32_t cpu_load;
|
||||
uint32_t cpu_load_measurements;
|
||||
};
|
||||
|
||||
static struct ztress_runtime rt;
|
||||
|
||||
static void test_timeout(struct k_timer *timer)
|
||||
{
|
||||
ztress_abort();
|
||||
}
|
||||
|
||||
/* Ratio is 1/16, e.g using ratio 14 reduces all timeouts by multipling it by 14/16.
|
||||
* 16 fraction is used to avoid dividing which may take more time on certain platforms.
|
||||
*/
|
||||
static void adjust_load(uint8_t ratio)
|
||||
{
|
||||
for (uint32_t i = 0; i < context_cnt; i++) {
|
||||
uint32_t new_ticks = ratio * (uint32_t)backoff[i].ticks / 16;
|
||||
|
||||
backoff[i].ticks = MAX(4, new_ticks);
|
||||
}
|
||||
}
|
||||
|
||||
static void progress_timeout(struct k_timer *timer)
|
||||
{
|
||||
struct ztress_context_data *thread_data = k_timer_user_data_get(timer);
|
||||
uint32_t progress = 100;
|
||||
uint32_t cnt = context_cnt;
|
||||
|
||||
if (tmr_data != NULL) {
|
||||
cnt--;
|
||||
if (tmr_data->exec_cnt != 0 && exec_cnt[cnt] != 0) {
|
||||
progress = (100 * exec_cnt[cnt]) / tmr_data->exec_cnt;
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < cnt; i++) {
|
||||
if (thread_data[i].exec_cnt == 0 && thread_data[i].preempt_cnt == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
uint32_t exec_progress = (thread_data[i].exec_cnt) ?
|
||||
(100 * exec_cnt[i]) / thread_data[i].exec_cnt : 100;
|
||||
uint32_t preempt_progress = (thread_data[i].preempt_cnt) ?
|
||||
(100 * preempt_cnt[i]) / thread_data[i].preempt_cnt : 100;
|
||||
uint32_t thread_progress = MIN(exec_progress, preempt_progress);
|
||||
|
||||
progress = MIN(progress, thread_progress);
|
||||
}
|
||||
|
||||
|
||||
uint64_t rem = 1000 * (k_timer_expires_ticks(&test_timer) - sys_clock_tick_get()) /
|
||||
CONFIG_SYS_CLOCK_TICKS_PER_SEC;
|
||||
|
||||
printk("\r%u%% remaining:%u ms", progress, (uint32_t)rem);
|
||||
}
|
||||
|
||||
static void control_load(void)
|
||||
{
|
||||
static uint64_t prev_cycles;
|
||||
static uint64_t total_cycles;
|
||||
|
||||
k_thread_runtime_stats_t rt_stats_thread;
|
||||
k_thread_runtime_stats_t rt_stats_all;
|
||||
int err = 0;
|
||||
|
||||
err = k_thread_runtime_stats_get(idle_tid, &rt_stats_thread);
|
||||
if (err < 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
err = k_thread_runtime_stats_all_get(&rt_stats_all);
|
||||
if (err < 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
int load = 1000 - (1000 * (rt_stats_thread.execution_cycles - prev_cycles) /
|
||||
(rt_stats_all.execution_cycles - total_cycles));
|
||||
|
||||
prev_cycles = rt_stats_thread.execution_cycles;
|
||||
total_cycles = rt_stats_all.execution_cycles;
|
||||
|
||||
int avg_load = (rt.cpu_load * rt.cpu_load_measurements + load) /
|
||||
(rt.cpu_load_measurements + 1);
|
||||
|
||||
rt.cpu_load = avg_load;
|
||||
rt.cpu_load_measurements++;
|
||||
|
||||
if (load > 800 && load < 850) {
|
||||
/* Expected load */
|
||||
} else if (load > 850) {
|
||||
/* Slightly reduce load. */
|
||||
adjust_load(18);
|
||||
} else if (load < 300) {
|
||||
adjust_load(8);
|
||||
} else if (load < 500) {
|
||||
adjust_load(12);
|
||||
} else {
|
||||
adjust_load(14);
|
||||
}
|
||||
}
|
||||
|
||||
static void ctrl_timeout(struct k_timer *timer)
|
||||
{
|
||||
control_load();
|
||||
}
|
||||
|
||||
void preempt_update(void)
|
||||
{
|
||||
uint32_t mask = active_mask;
|
||||
|
||||
while (mask) {
|
||||
int idx = 31 - __builtin_clz(mask);
|
||||
|
||||
/* Clear mask to ensure that other context does not count same thread. */
|
||||
if ((atomic_and(&active_mask, ~BIT(idx)) & BIT(idx)) != 0) {
|
||||
preempt_cnt[idx]++;
|
||||
}
|
||||
|
||||
mask &= ~BIT(idx);
|
||||
}
|
||||
}
|
||||
|
||||
static bool cont_check(struct ztress_context_data *context_data, uint32_t priority)
|
||||
{
|
||||
if (context_data->preempt_cnt != 0 && preempt_cnt[priority] >= context_data->preempt_cnt) {
|
||||
atomic_dec(&active_cnt);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (context_data->exec_cnt != 0 && exec_cnt[priority] >= context_data->exec_cnt) {
|
||||
atomic_dec(&active_cnt);
|
||||
return false;
|
||||
}
|
||||
|
||||
return active_cnt > 0;
|
||||
}
|
||||
|
||||
static k_timeout_t randomize_t(k_timeout_t t)
|
||||
{
|
||||
if (t.ticks <= 4) {
|
||||
return t;
|
||||
}
|
||||
|
||||
uint32_t mask = BIT_MASK(31 - __builtin_clz((uint32_t)t.ticks));
|
||||
|
||||
t.ticks += (sys_rand32_get() & mask);
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
static void microdelay(void)
|
||||
{
|
||||
static volatile int microdelay_cnt;
|
||||
uint32_t repeat = sys_rand32_get() & 0xff;
|
||||
|
||||
for (int i = 0; i < repeat; i++) {
|
||||
microdelay_cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
static void ztress_timeout(struct k_timer *timer)
|
||||
{
|
||||
struct ztress_context_data *context_data = k_timer_user_data_get(timer);
|
||||
uint32_t priority = 0;
|
||||
bool cont_test, cont;
|
||||
|
||||
preempt_update();
|
||||
cont_test = cont_check(context_data, priority);
|
||||
cont = context_data->handler(context_data->user_data,
|
||||
exec_cnt[priority],
|
||||
!cont_test,
|
||||
priority);
|
||||
exec_cnt[priority]++;
|
||||
|
||||
if (cont == true && cont_test == true) {
|
||||
k_timer_start(timer, randomize_t(backoff[priority]), K_NO_WAIT);
|
||||
}
|
||||
}
|
||||
|
||||
static void sleep(k_timeout_t t)
|
||||
{
|
||||
if (K_TIMEOUT_EQ(t, K_NO_WAIT) == false) {
|
||||
t = randomize_t(t);
|
||||
k_sleep(t);
|
||||
}
|
||||
}
|
||||
|
||||
static void ztress_thread(void *data, void *prio, void *unused)
|
||||
{
|
||||
struct ztress_context_data *context_data = data;
|
||||
uint32_t priority = (uint32_t)(uintptr_t)prio;
|
||||
bool cont_test, cont;
|
||||
|
||||
do {
|
||||
uint32_t cnt = exec_cnt[priority];
|
||||
|
||||
preempt_update();
|
||||
exec_cnt[priority] = cnt + 1;
|
||||
cont_test = cont_check(context_data, priority);
|
||||
microdelay();
|
||||
atomic_or(&active_mask, BIT(priority));
|
||||
cont = context_data->handler(context_data->user_data, cnt, !cont_test, priority);
|
||||
atomic_and(&active_mask, ~BIT(priority));
|
||||
|
||||
sleep(backoff[priority]);
|
||||
} while (cont == true && cont_test == true);
|
||||
}
|
||||
|
||||
static void thread_cb(const struct k_thread *cthread, void *user_data)
|
||||
{
|
||||
const char *tname = k_thread_name_get((struct k_thread *)cthread);
|
||||
|
||||
if (strcmp(tname, "idle 00") == 0) {
|
||||
idle_tid = (struct k_thread *)cthread;
|
||||
}
|
||||
}
|
||||
|
||||
static void ztress_init(struct ztress_context_data *thread_data)
|
||||
{
|
||||
memset(exec_cnt, 0, sizeof(exec_cnt));
|
||||
memset(preempt_cnt, 0, sizeof(preempt_cnt));
|
||||
memset(&rt, 0, sizeof(rt));
|
||||
k_thread_foreach(thread_cb, NULL);
|
||||
k_msleep(10);
|
||||
|
||||
if (idle_tid == NULL) {
|
||||
printk("Failed to identify idle thread. CPU load will not be tracked\n");
|
||||
}
|
||||
|
||||
k_timer_start(&ctrl_timer, K_MSEC(100), K_MSEC(100));
|
||||
k_timer_user_data_set(&progress_timer, thread_data);
|
||||
k_timer_start(&progress_timer,
|
||||
K_MSEC(CONFIG_ZTRESS_REPORT_PROGRESS_MS),
|
||||
K_MSEC(CONFIG_ZTRESS_REPORT_PROGRESS_MS));
|
||||
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT) == false) {
|
||||
k_timer_start(&test_timer, timeout, K_NO_WAIT);
|
||||
}
|
||||
}
|
||||
|
||||
static void ztress_end(int old_prio)
|
||||
{
|
||||
k_timer_stop(&ctrl_timer);
|
||||
k_timer_stop(&progress_timer);
|
||||
k_timer_stop(&test_timer);
|
||||
k_thread_priority_set(k_current_get(), old_prio);
|
||||
}
|
||||
|
||||
static void active_cnt_init(struct ztress_context_data *data)
|
||||
{
|
||||
if (data->preempt_cnt != 0 || data->exec_cnt != 0) {
|
||||
active_cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
int ztress_execute(struct ztress_context_data *timer_data,
|
||||
struct ztress_context_data *thread_data,
|
||||
size_t cnt)
|
||||
{
|
||||
/* Start control timer. */
|
||||
int old_prio = k_thread_priority_get(k_current_get());
|
||||
int priority, ztress_prio = 0;
|
||||
|
||||
if (cnt > CONFIG_ZTRESS_MAX_THREADS) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cnt + 2 > CONFIG_NUM_PREEMPT_PRIORITIES) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ztress_init(thread_data);
|
||||
|
||||
context_cnt = cnt + (timer_data ? 1 : 0);
|
||||
priority = K_LOWEST_THREAD_PRIO - cnt - 1;
|
||||
|
||||
k_thread_priority_set(k_current_get(), priority);
|
||||
priority++;
|
||||
|
||||
tmr_data = timer_data;
|
||||
if (timer_data != NULL) {
|
||||
active_cnt_init(timer_data);
|
||||
backoff[ztress_prio] = timer_data->t;
|
||||
init_backoff[ztress_prio] = timer_data->t;
|
||||
k_timer_user_data_set(&ztress_timer, timer_data);
|
||||
k_timer_start(&ztress_timer, backoff[ztress_prio], K_NO_WAIT);
|
||||
ztress_prio++;
|
||||
}
|
||||
|
||||
for (int i = 0; i < cnt; i++) {
|
||||
active_cnt_init(&thread_data[i]);
|
||||
backoff[ztress_prio] = thread_data[i].t;
|
||||
init_backoff[ztress_prio] = thread_data[i].t;
|
||||
tids[i] = k_thread_create(&threads[i], stacks[i], CONFIG_ZTRESS_STACK_SIZE,
|
||||
ztress_thread,
|
||||
&thread_data[i], (void *)(uintptr_t)ztress_prio, NULL,
|
||||
priority, 0, K_NO_WAIT);
|
||||
(void)k_thread_name_set(tids[i], thread_names[i]);
|
||||
priority++;
|
||||
ztress_prio++;
|
||||
}
|
||||
|
||||
/* Wait until all threads complete. */
|
||||
for (int i = 0; i < cnt; i++) {
|
||||
k_thread_join(tids[i], K_FOREVER);
|
||||
}
|
||||
|
||||
/* Abort to stop timer. */
|
||||
if (timer_data != NULL) {
|
||||
ztress_abort();
|
||||
(void)k_timer_status_sync(&ztress_timer);
|
||||
}
|
||||
|
||||
/* print raport */
|
||||
ztress_report();
|
||||
|
||||
ztress_end(old_prio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ztress_abort(void)
|
||||
{
|
||||
atomic_set(&active_cnt, 0);
|
||||
}
|
||||
|
||||
void ztress_set_timeout(k_timeout_t t)
|
||||
{
|
||||
timeout = t;
|
||||
}
|
||||
|
||||
void ztress_report(void)
|
||||
{
|
||||
printk("\nZtress execution report:\n");
|
||||
for (uint32_t i = 0; i < context_cnt; i++) {
|
||||
printk("\t context %u:\n\t\t - executed:%u, preempted:%u\n",
|
||||
i, exec_cnt[i], preempt_cnt[i]);
|
||||
printk("\t\t - ticks initial:%u, optimized:%u\n",
|
||||
(uint32_t)init_backoff[i].ticks, (uint32_t)backoff[i].ticks);
|
||||
}
|
||||
|
||||
printk("\tAvarage CPU load:%u%%, measurements:%u\n",
|
||||
rt.cpu_load / 10, rt.cpu_load_measurements);
|
||||
}
|
||||
|
||||
int ztress_exec_count(uint32_t id)
|
||||
{
|
||||
if (id >= context_cnt) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return exec_cnt[id];
|
||||
}
|
||||
|
||||
int ztress_preempt_count(uint32_t id)
|
||||
{
|
||||
if (id >= context_cnt) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return preempt_cnt[id];
|
||||
}
|
||||
|
||||
uint32_t ztress_optimized_ticks(uint32_t id)
|
||||
{
|
||||
if (id >= context_cnt) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return backoff[id].ticks;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue