kernel: Decouple sleep from suspend

Sleeping and suspended are now orthogonal states. That is, a thread
may be both sleeping and suspended and the two do not interact. One
repercussion of this is that suspending a thread will no longer
abort its timeout.

Threads are now created in the 'sleeping' state instead of a
'suspended' state. This dovetails nicely with the start delay that
can be given to a newly created thread--it is as though the very
first operation that a thread with a start delay is a sleep.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
Peter Mitsis 2024-11-18 09:46:24 -08:00 committed by Benjamin Cabé
commit 35435928c2
6 changed files with 35 additions and 43 deletions

View file

@ -542,8 +542,6 @@ __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
* This routine puts the current thread to sleep for @a duration,
* specified as a k_timeout_t object.
*
* @note if @a timeout is set to K_FOREVER then the thread is suspended.
*
* @param timeout Desired duration of sleep.
*
* @return Zero if the requested time has elapsed or if the thread was woken up
@ -1024,10 +1022,11 @@ int k_thread_cpu_pin(k_tid_t thread, int cpu);
* This routine prevents the kernel scheduler from making @a thread
* the current thread. All other internal operations on @a thread are
* still performed; for example, kernel objects it is waiting on are
* still handed to it. Note that any existing timeouts
* (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
* will be canceled. On resume, the thread will begin running
* immediately and return from the blocked call.
* still handed to it. Thread suspension does not impact any timeout
* upon which the thread may be waiting (such as a timeout from a call
* to k_sem_take() or k_sleep()). Thus if the timeout expires while the
* thread is suspended, it is still suspended until k_thread_resume()
* is called.
*
* When the target thread is active on another CPU, the caller will block until
* the target thread is halted (suspended or aborted). But if the caller is in
@ -1043,8 +1042,9 @@ __syscall void k_thread_suspend(k_tid_t thread);
/**
* @brief Resume a suspended thread.
*
* This routine allows the kernel scheduler to make @a thread the current
* thread, when it is next eligible for that role.
* This routine reverses the thread suspension from k_thread_suspend()
* and allows the kernel scheduler to make @a thread the current thread
* when it is next eligible for that role.
*
* If @a thread is not currently suspended, the routine has no effect.
*
@ -1060,14 +1060,14 @@ __syscall void k_thread_resume(k_tid_t thread);
* on it.
*
* @note This is a legacy API for compatibility. Modern Zephyr
* threads are initialized in the "suspended" state and no not need
* threads are initialized in the "sleeping" state and do not need
* special handling for "start".
*
* @param thread thread to start
*/
static inline void k_thread_start(k_tid_t thread)
{
k_thread_resume(thread);
k_wakeup(thread);
}
/**

View file

@ -146,6 +146,11 @@ static inline void z_mark_thread_as_not_pending(struct k_thread *thread)
thread->base.thread_state &= ~_THREAD_PENDING;
}
static inline bool z_is_thread_sleeping(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_SLEEPING) != 0U;
}
static inline void z_mark_thread_as_sleeping(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_SLEEPING;

View file

@ -598,7 +598,7 @@ static void init_idle_thread(int i)
stack_size, idle, &_kernel.cpus[i],
NULL, NULL, K_IDLE_PRIO, K_ESSENTIAL,
tname);
z_mark_thread_as_not_suspended(thread);
z_mark_thread_as_not_sleeping(thread);
#ifdef CONFIG_SMP
thread->base.is_idle = 1U;
@ -675,7 +675,7 @@ static char *prepare_multithreading(void)
NULL, NULL, NULL,
CONFIG_MAIN_THREAD_PRIORITY,
K_ESSENTIAL, "main");
z_mark_thread_as_not_suspended(&z_main_thread);
z_mark_thread_as_not_sleeping(&z_main_thread);
z_ready_thread(&z_main_thread);
z_init_cpu(0);

View file

@ -498,8 +498,6 @@ void z_impl_k_thread_suspend(k_tid_t thread)
return;
}
(void)z_abort_thread_timeout(thread);
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) {
@ -631,7 +629,7 @@ void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
if (thread->base.pended_on != NULL) {
unpend_thread_no_timeout(thread);
}
z_mark_thread_as_not_suspended(thread);
z_mark_thread_as_not_sleeping(thread);
ready_thread(thread);
}
}
@ -1111,12 +1109,10 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
unready_thread(arch_current_thread());
z_add_thread_timeout(arch_current_thread(), timeout);
z_mark_thread_as_suspended(arch_current_thread());
z_mark_thread_as_sleeping(arch_current_thread());
(void)z_swap(&_sched_spinlock, key);
__ASSERT(!z_is_thread_state_set(arch_current_thread(), _THREAD_SUSPENDED), "");
/* We require a 32 bit unsigned subtraction to care a wraparound */
uint32_t left_ticks = expected_wakeup_ticks - sys_clock_tick_get_32();
@ -1137,20 +1133,12 @@ int32_t z_impl_k_sleep(k_timeout_t timeout)
SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
/* in case of K_FOREVER, we suspend */
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
k_thread_suspend(arch_current_thread());
SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
return (int32_t) K_TICKS_FOREVER;
}
ticks = timeout.ticks;
ticks = z_tick_sleep(ticks);
int32_t ret = k_ticks_to_ms_ceil64(ticks);
int32_t ret = K_TIMEOUT_EQ(timeout, K_FOREVER) ? K_TICKS_FOREVER :
k_ticks_to_ms_ceil64(ticks);
SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
@ -1193,25 +1181,19 @@ void z_impl_k_wakeup(k_tid_t thread)
{
SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
if (z_is_thread_pending(thread)) {
return;
}
if (z_abort_thread_timeout(thread) < 0) {
/* Might have just been sleeping forever */
if (thread->base.thread_state != _THREAD_SUSPENDED) {
return;
}
}
(void)z_abort_thread_timeout(thread);
k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
z_mark_thread_as_not_suspended(thread);
if (thread_active_elsewhere(thread) == NULL) {
ready_thread(thread);
if (!z_is_thread_sleeping(thread)) {
k_spin_unlock(&_sched_spinlock, key);
return;
}
z_mark_thread_as_not_sleeping(thread);
ready_thread(thread);
if (arch_is_in_isr()) {
k_spin_unlock(&_sched_spinlock, key);
} else {

View file

@ -543,7 +543,7 @@ char *z_setup_new_thread(struct k_thread *new_thread,
z_waitq_init(&new_thread->join_queue);
/* Initialize various struct k_thread members */
z_init_thread_base(&new_thread->base, prio, _THREAD_SUSPENDED, options);
z_init_thread_base(&new_thread->base, prio, _THREAD_SLEEPING, options);
stack_ptr = setup_thread_stack(new_thread, stack, stack_size);
#ifdef CONFIG_KERNEL_COHERENCE

View file

@ -69,6 +69,11 @@ int tm_thread_create(int thread_id, int priority, void (*entry_function)(void *,
TM_TEST_STACK_SIZE, entry_function,
NULL, NULL, NULL, priority, 0, K_FOREVER);
/* Thread started in sleeping state. Switch to suspended state */
k_thread_suspend(&test_thread[thread_id]);
k_wakeup(&test_thread[thread_id]);
return (tid == &test_thread[thread_id]) ? TM_SUCCESS : TM_ERROR;
}