From 7cdf40541bfc13cce6a0f6990511dc585ec42fc4 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Mon, 25 Nov 2024 07:06:58 -0800 Subject: [PATCH] kernel/sched: Eliminate PRESTART thread state Traditionally threads have been initialized with a PRESTART flag set, which gets cleared when the thread runs for the first time via either its timeout or the k_thread_start() API. But if you think about it, this is no different, semantically, than SUSPENDED: the thread is prevented from running until the flag is cleared. So unify the two. Start threads in the SUSPENDED state, point everyone looking at the PRESTART bit to the SUSPENDED flag, and make k_thread_start() be a synonym for k_thread_resume(). There is some mild code size savings from the eliminated duplication, but the real win here is that we make space in the thread flags byte, which had run out. Signed-off-by: Andy Ross --- include/zephyr/kernel.h | 30 +++++++++++-------- include/zephyr/kernel_structs.h | 3 -- kernel/include/kthread.h | 13 +------- kernel/init.c | 4 +-- kernel/sched.c | 19 +----------- kernel/thread.c | 19 +----------- .../COMPONENT_ZEPHYR/cyabs_rtos_zephyr.c | 4 --- subsys/mgmt/mcumgr/transport/src/smp_udp.c | 2 +- .../portability/cmsis_rtos_v1/cmsis_kernel.c | 2 +- .../portability/cmsis_rtos_v1/cmsis_thread.c | 2 +- subsys/portability/cmsis_rtos_v2/thread.c | 5 +--- .../src/tm_porting_layer_zephyr.c | 6 +--- .../thread_apis/src/test_kthread_for_each.c | 6 +--- 13 files changed, 29 insertions(+), 86 deletions(-) diff --git a/include/zephyr/kernel.h b/include/zephyr/kernel.h index 9f8a8171a94..a35c453ed8c 100644 --- a/include/zephyr/kernel.h +++ b/include/zephyr/kernel.h @@ -694,18 +694,6 @@ static inline k_tid_t k_current_get(void) */ __syscall void k_thread_abort(k_tid_t thread); - -/** - * @brief Start an inactive thread - * - * If a thread was created with K_FOREVER in the delay parameter, it will - * not be added to the scheduling queue until this function is called - * on it. - * - * @param thread thread to start - */ -__syscall void k_thread_start(k_tid_t thread); - k_ticks_t z_timeout_expires(const struct _timeout *timeout); k_ticks_t z_timeout_remaining(const struct _timeout *timeout); @@ -1064,6 +1052,24 @@ __syscall void k_thread_suspend(k_tid_t thread); */ __syscall void k_thread_resume(k_tid_t thread); +/** + * @brief Start an inactive thread + * + * If a thread was created with K_FOREVER in the delay parameter, it will + * not be added to the scheduling queue until this function is called + * on it. + * + * @note This is a legacy API for compatibility. Modern Zephyr + * threads are initialized in the "suspended" state and no not need + * special handling for "start". + * + * @param thread thread to start + */ +static inline void k_thread_start(k_tid_t thread) +{ + k_thread_resume(thread); +} + /** * @brief Set time-slicing period and scope. * diff --git a/include/zephyr/kernel_structs.h b/include/zephyr/kernel_structs.h index 175e0a6c6dd..3c1df990a22 100644 --- a/include/zephyr/kernel_structs.h +++ b/include/zephyr/kernel_structs.h @@ -54,9 +54,6 @@ extern "C" { /* Thread is waiting on an object */ #define _THREAD_PENDING (BIT(1)) -/* Thread has not yet started */ -#define _THREAD_PRESTART (BIT(2)) - /* Thread has terminated */ #define _THREAD_DEAD (BIT(3)) diff --git a/kernel/include/kthread.h b/kernel/include/kthread.h index 512636c49c5..39bea42c912 100644 --- a/kernel/include/kthread.h +++ b/kernel/include/kthread.h @@ -15,7 +15,6 @@ #define Z_STATE_STR_DUMMY "dummy" #define Z_STATE_STR_PENDING "pending" -#define Z_STATE_STR_PRESTART "prestart" #define Z_STATE_STR_DEAD "dead" #define Z_STATE_STR_SUSPENDED "suspended" #define Z_STATE_STR_ABORTING "aborting" @@ -97,7 +96,7 @@ static inline bool z_is_thread_prevented_from_running(struct k_thread *thread) { uint8_t state = thread->base.thread_state; - return (state & (_THREAD_PENDING | _THREAD_PRESTART | _THREAD_DEAD | + return (state & (_THREAD_PENDING | _THREAD_DEAD | _THREAD_DUMMY | _THREAD_SUSPENDED)) != 0U; } @@ -113,11 +112,6 @@ static inline bool z_is_thread_ready(struct k_thread *thread) z_is_thread_timeout_active(thread)); } -static inline bool z_has_thread_started(struct k_thread *thread) -{ - return (thread->base.thread_state & _THREAD_PRESTART) == 0U; -} - static inline bool z_is_thread_state_set(struct k_thread *thread, uint32_t state) { return (thread->base.thread_state & state) != 0U; @@ -142,11 +136,6 @@ static inline void z_mark_thread_as_not_suspended(struct k_thread *thread) SYS_PORT_TRACING_FUNC(k_thread, sched_resume, thread); } -static inline void z_mark_thread_as_started(struct k_thread *thread) -{ - thread->base.thread_state &= ~_THREAD_PRESTART; -} - static inline void z_mark_thread_as_pending(struct k_thread *thread) { thread->base.thread_state |= _THREAD_PENDING; diff --git a/kernel/init.c b/kernel/init.c index c3b6ea9bb33..0327107129c 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -598,7 +598,7 @@ static void init_idle_thread(int i) stack_size, idle, &_kernel.cpus[i], NULL, NULL, K_IDLE_PRIO, K_ESSENTIAL, tname); - z_mark_thread_as_started(thread); + z_mark_thread_as_not_suspended(thread); #ifdef CONFIG_SMP thread->base.is_idle = 1U; @@ -675,7 +675,7 @@ static char *prepare_multithreading(void) NULL, NULL, NULL, CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL, "main"); - z_mark_thread_as_started(&z_main_thread); + z_mark_thread_as_not_suspended(&z_main_thread); z_ready_thread(&z_main_thread); z_init_cpu(0); diff --git a/kernel/sched.c b/kernel/sched.c index 61e5b5dedbd..4105ebaa3eb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -405,20 +405,6 @@ void z_move_thread_to_end_of_prio_q(struct k_thread *thread) } } -void z_sched_start(struct k_thread *thread) -{ - k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); - - if (z_has_thread_started(thread)) { - k_spin_unlock(&_sched_spinlock, key); - return; - } - - z_mark_thread_as_started(thread); - ready_thread(thread); - z_reschedule(&_sched_spinlock, key); -} - /* Spins in ISR context, waiting for a thread known to be running on * another CPU to catch the IPI we sent and halt. Note that we check * for ourselves being asynchronously halted first to prevent simple @@ -645,10 +631,7 @@ void z_sched_wake_thread(struct k_thread *thread, bool is_timeout) if (thread->base.pended_on != NULL) { unpend_thread_no_timeout(thread); } - z_mark_thread_as_started(thread); - if (is_timeout) { - z_mark_thread_as_not_suspended(thread); - } + z_mark_thread_as_not_suspended(thread); ready_thread(thread); } } diff --git a/kernel/thread.c b/kernel/thread.c index fba4f458c40..a0761645708 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -236,7 +236,6 @@ const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size) } state_string[] = { SS_ENT(DUMMY), SS_ENT(PENDING), - SS_ENT(PRESTART), SS_ENT(DEAD), SS_ENT(SUSPENDED), SS_ENT(ABORTING), @@ -347,22 +346,6 @@ void z_check_stack_sentinel(void) } #endif /* CONFIG_STACK_SENTINEL */ -void z_impl_k_thread_start(k_tid_t thread) -{ - SYS_PORT_TRACING_OBJ_FUNC(k_thread, start, thread); - - z_sched_start(thread); -} - -#ifdef CONFIG_USERSPACE -static inline void z_vrfy_k_thread_start(k_tid_t thread) -{ - K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); - z_impl_k_thread_start(thread); -} -#include -#endif /* CONFIG_USERSPACE */ - #if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0) int z_stack_adjust_initialized; @@ -559,7 +542,7 @@ char *z_setup_new_thread(struct k_thread *new_thread, z_waitq_init(&new_thread->join_queue); /* Initialize various struct k_thread members */ - z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options); + z_init_thread_base(&new_thread->base, prio, _THREAD_SUSPENDED, options); stack_ptr = setup_thread_stack(new_thread, stack, stack_size); #ifdef CONFIG_KERNEL_COHERENCE diff --git a/modules/hal_infineon/abstraction-rtos/source/COMPONENT_ZEPHYR/cyabs_rtos_zephyr.c b/modules/hal_infineon/abstraction-rtos/source/COMPONENT_ZEPHYR/cyabs_rtos_zephyr.c index 3e1acbbc17b..c9df61b2f73 100644 --- a/modules/hal_infineon/abstraction-rtos/source/COMPONENT_ZEPHYR/cyabs_rtos_zephyr.c +++ b/modules/hal_infineon/abstraction-rtos/source/COMPONENT_ZEPHYR/cyabs_rtos_zephyr.c @@ -231,10 +231,6 @@ cy_rslt_t cy_rtos_get_thread_state(cy_thread_t *thread, cy_thread_state_t *state *state = CY_THREAD_STATE_UNKNOWN; break; - case _THREAD_PRESTART: - *state = CY_THREAD_STATE_INACTIVE; - break; - case _THREAD_SUSPENDED: case _THREAD_PENDING: *state = CY_THREAD_STATE_BLOCKED; diff --git a/subsys/mgmt/mcumgr/transport/src/smp_udp.c b/subsys/mgmt/mcumgr/transport/src/smp_udp.c index 4e47ac8ab11..0f44782bdbd 100644 --- a/subsys/mgmt/mcumgr/transport/src/smp_udp.c +++ b/subsys/mgmt/mcumgr/transport/src/smp_udp.c @@ -41,9 +41,9 @@ BUILD_ASSERT(0, "Either IPv4 or IPv6 SMP must be enabled for the MCUmgr UDP SMP BUILD_ASSERT(sizeof(struct sockaddr) <= CONFIG_MCUMGR_TRANSPORT_NETBUF_USER_DATA_SIZE, "CONFIG_MCUMGR_TRANSPORT_NETBUF_USER_DATA_SIZE must be >= sizeof(struct sockaddr)"); +/* FIXME: dangerous logic, use a kernel API for this */ #define IS_THREAD_RUNNING(thread) \ (thread.base.thread_state & (_THREAD_PENDING | \ - _THREAD_PRESTART | \ _THREAD_SUSPENDED | \ _THREAD_QUEUED) ? true : false) diff --git a/subsys/portability/cmsis_rtos_v1/cmsis_kernel.c b/subsys/portability/cmsis_rtos_v1/cmsis_kernel.c index 61f231ff65d..cd3b7263c4a 100644 --- a/subsys/portability/cmsis_rtos_v1/cmsis_kernel.c +++ b/subsys/portability/cmsis_rtos_v1/cmsis_kernel.c @@ -41,5 +41,5 @@ osStatus osKernelStart(void) */ int32_t osKernelRunning(void) { - return z_has_thread_started(&z_main_thread); + return !z_is_thread_suspended(&z_main_thread); } diff --git a/subsys/portability/cmsis_rtos_v1/cmsis_thread.c b/subsys/portability/cmsis_rtos_v1/cmsis_thread.c index 41752a51795..862982aa676 100644 --- a/subsys/portability/cmsis_rtos_v1/cmsis_thread.c +++ b/subsys/portability/cmsis_rtos_v1/cmsis_thread.c @@ -14,7 +14,7 @@ static inline int _is_thread_cmsis_inactive(struct k_thread *thread) { uint8_t state = thread->base.thread_state; - return state & (_THREAD_PRESTART | _THREAD_DEAD); + return state & _THREAD_DEAD; } static inline int32_t zephyr_to_cmsis_priority(uint32_t z_prio) diff --git a/subsys/portability/cmsis_rtos_v2/thread.c b/subsys/portability/cmsis_rtos_v2/thread.c index f5e9cd67027..0a757344313 100644 --- a/subsys/portability/cmsis_rtos_v2/thread.c +++ b/subsys/portability/cmsis_rtos_v2/thread.c @@ -39,7 +39,7 @@ static inline int _is_thread_cmsis_inactive(struct k_thread *thread) { uint8_t state = thread->base.thread_state; - return state & (_THREAD_PRESTART | _THREAD_DEAD); + return state & _THREAD_DEAD; } static inline uint32_t zephyr_to_cmsis_priority(uint32_t z_prio) @@ -308,9 +308,6 @@ osThreadState_t osThreadGetState(osThreadId_t thread_id) case _THREAD_DUMMY: state = osThreadError; break; - case _THREAD_PRESTART: - state = osThreadInactive; - break; case _THREAD_DEAD: state = osThreadTerminated; break; diff --git a/tests/benchmarks/thread_metric/src/tm_porting_layer_zephyr.c b/tests/benchmarks/thread_metric/src/tm_porting_layer_zephyr.c index fc03f25c466..6eeaa9f8af8 100644 --- a/tests/benchmarks/thread_metric/src/tm_porting_layer_zephyr.c +++ b/tests/benchmarks/thread_metric/src/tm_porting_layer_zephyr.c @@ -78,11 +78,7 @@ int tm_thread_create(int thread_id, int priority, void (*entry_function)(void *, */ int tm_thread_resume(int thread_id) { - if (test_thread[thread_id].base.thread_state & _THREAD_PRESTART) { - k_thread_start(&test_thread[thread_id]); - } else { - k_thread_resume(&test_thread[thread_id]); - } + k_thread_resume(&test_thread[thread_id]); return TM_SUCCESS; } diff --git a/tests/kernel/threads/thread_apis/src/test_kthread_for_each.c b/tests/kernel/threads/thread_apis/src/test_kthread_for_each.c index 883b6831b74..71f84e7b33f 100644 --- a/tests/kernel/threads/thread_apis/src/test_kthread_for_each.c +++ b/tests/kernel/threads/thread_apis/src/test_kthread_for_each.c @@ -213,7 +213,7 @@ ZTEST(threads_lifecycle_1cpu, test_k_thread_foreach_unlocked_null_cb) * @brief Test k_thread_state_str API with null callback * * @details It's impossible to sched a thread step by step manually to - * experience each state from _THREAD_PRESTART to _THREAD_DEAD. To cover each + * experience each state from initialization to _THREAD_DEAD. To cover each * line of function k_thread_state_str(), set thread_state of tdata1 and check * the string this function returns * @@ -245,10 +245,6 @@ ZTEST(threads_lifecycle_1cpu, test_k_thread_state_str) str = k_thread_state_str(tid, state_str, sizeof(state_str)); zassert_str_equal(str, "pending"); - tid->base.thread_state = _THREAD_PRESTART; - str = k_thread_state_str(tid, state_str, sizeof(state_str)); - zassert_str_equal(str, "prestart"); - tid->base.thread_state = _THREAD_DEAD; str = k_thread_state_str(tid, state_str, sizeof(state_str)); zassert_str_equal(str, "dead");