diff --git a/kernel/unified/include/kernel_structs.h b/kernel/unified/include/kernel_structs.h index e4483f77dbf..8ec5d99532f 100644 --- a/kernel/unified/include/kernel_structs.h +++ b/kernel/unified/include/kernel_structs.h @@ -33,11 +33,6 @@ /* thread is defined statically */ #define K_STATIC (1 << 8) -#define K_READY 0 - -/* Thread is waiting on a timeout */ -#define K_TIMING (1 << 12) - /* Thread is waiting on an object */ #define K_PENDING (1 << 13) @@ -53,9 +48,6 @@ /* Not a real thread */ #define K_DUMMY (1 << 17) -#define K_EXECUTION_MASK \ - (K_TIMING | K_PENDING | K_PRESTART | K_DEAD | K_SUSPENDED | K_DUMMY) - #if defined(CONFIG_FP_SHARING) /* thread uses floating point registers */ #define K_FP_REGS (1 << 4) diff --git a/kernel/unified/include/ksched.h b/kernel/unified/include/ksched.h index 8c091eba7cd..08dab077ffa 100644 --- a/kernel/unified/include/ksched.h +++ b/kernel/unified/include/ksched.h @@ -258,22 +258,14 @@ static inline void _mark_thread_as_not_suspended(struct k_thread *thread) thread->base.flags &= ~K_SUSPENDED; } -/* mark a thread as being in the timer queue */ -static inline void _mark_thread_as_timing(struct k_thread *thread) +/* check if a thread is on the timeout queue */ +static inline int _is_thread_timeout_active(struct k_thread *thread) { - thread->base.flags |= K_TIMING; -} - -/* mark a thread as not being in the timer queue */ -static inline void _mark_thread_as_not_timing(struct k_thread *thread) -{ - thread->base.flags &= ~K_TIMING; -} - -/* check if a thread is on the timer queue */ -static inline int _is_thread_timing(struct k_thread *thread) -{ - return !!(thread->base.flags & K_TIMING); +#ifdef CONFIG_SYS_CLOCK_EXISTS + return thread->base.timeout.delta_ticks_from_prev != -1; +#else + return 0; +#endif } static inline int _has_thread_started(struct k_thread *thread) @@ -281,10 +273,19 @@ static inline int _has_thread_started(struct k_thread *thread) return !(thread->base.flags & K_PRESTART); } +static inline int _is_thread_prevented_from_running(struct k_thread *thread) +{ + return thread->base.flags & (K_PENDING | K_PRESTART | + K_DEAD | K_DUMMY | + K_SUSPENDED); + +} + /* check if a thread is ready */ static inline int _is_thread_ready(struct k_thread *thread) { - return (thread->base.flags & K_EXECUTION_MASK) == K_READY; + return !(_is_thread_prevented_from_running(thread) || + _is_thread_timeout_active(thread)); } /* mark a thread as pending in its TCS */ @@ -305,11 +306,22 @@ static inline int _is_thread_pending(struct k_thread *thread) return !!(thread->base.flags & K_PENDING); } -/* - * Mark the thread as not being in the timer queue. If this makes it ready, - * then add it to the ready queue according to its priority. +/** + * @brief Mark a thread as started + * + * This routine must be called with interrupts locked. + */ +static inline void _mark_thread_as_started(struct k_thread *thread) +{ + thread->base.flags &= ~K_PRESTART; +} + +/* + * Put the thread in the ready queue according to its priority if it is not + * blocked for another reason (eg. suspended). + * + * Must be called with interrupts locked. */ -/* must be called with interrupts locked */ static inline void _ready_thread(struct k_thread *thread) { __ASSERT(_is_prio_higher(thread->base.prio, K_LOWEST_THREAD_PRIO) || @@ -324,24 +336,14 @@ static inline void _ready_thread(struct k_thread *thread) "thread %p prio too high (id %d, cannot be higher than %d)", thread, thread->base.prio, K_HIGHEST_THREAD_PRIO); - /* K_PRESTART is needed to handle the start-with-delay case */ - _reset_thread_states(thread, K_TIMING|K_PRESTART); + /* needed to handle the start-with-delay case */ + _mark_thread_as_started(thread); if (_is_thread_ready(thread)) { _add_thread_to_ready_q(thread); } } -/** - * @brief Mark a thread as started - * - * This routine must be called with interrupts locked. - */ -static inline void _mark_thread_as_started(struct k_thread *thread) -{ - thread->base.flags &= ~K_PRESTART; -} - /** * @brief Mark thread as dead * diff --git a/kernel/unified/include/timeout_q.h b/kernel/unified/include/timeout_q.h index bbfaea78e80..fc730918d35 100644 --- a/kernel/unified/include/timeout_q.h +++ b/kernel/unified/include/timeout_q.h @@ -98,6 +98,8 @@ static inline struct _timeout *_handle_one_timeout( struct _timeout *t = (void *)sys_dlist_get(timeout_q); struct k_thread *thread = t->thread; + t->delta_ticks_from_prev = -1; + K_DEBUG("timeout %p\n", t); if (thread != NULL) { _unpend_thread_timing_out(thread, t); @@ -105,14 +107,6 @@ static inline struct _timeout *_handle_one_timeout( } else if (t->func) { t->func(t); } - /* - * Note: t->func() may add timeout again. Make sure that - * delta_ticks_from_prev is set to -1 only if timeout is - * still expired (delta_ticks_from_prev == 0) - */ - if (t->delta_ticks_from_prev == 0) { - t->delta_ticks_from_prev = -1; - } return (struct _timeout *)sys_dlist_peek_head(timeout_q); } diff --git a/kernel/unified/include/wait_q.h b/kernel/unified/include/wait_q.h index 02d59a1e05b..41f6b644c10 100644 --- a/kernel/unified/include/wait_q.h +++ b/kernel/unified/include/wait_q.h @@ -34,8 +34,21 @@ static ALWAYS_INLINE void _init_thread_timeout(struct _thread_base *thread_base) { ARG_UNUSED(thread_base); } -#define _add_thread_timeout(thread, wait_q, timeout) do { } while (0) -static inline int _abort_thread_timeout(struct k_thread *thread) { return 0; } + +static ALWAYS_INLINE void +_add_thread_timeout(struct k_thread *thread, _wait_q_t *wait_q, int32_t timeout) +{ + ARG_UNUSED(thread); + ARG_UNUSED(wait_q); + ARG_UNUSED(timeout); +} + +static ALWAYS_INLINE int _abort_thread_timeout(struct k_thread *thread) +{ + ARG_UNUSED(thread); + + return 0; +} #define _get_next_timeout_expiry() (K_FOREVER) #endif diff --git a/kernel/unified/legacy_timer.c b/kernel/unified/legacy_timer.c index aae48913514..d7b52895263 100644 --- a/kernel/unified/legacy_timer.c +++ b/kernel/unified/legacy_timer.c @@ -33,7 +33,6 @@ void _legacy_sleep(int32_t ticks) int key = irq_lock(); - _mark_thread_as_timing(_current); _remove_thread_from_ready_q(_current); _add_thread_timeout(_current, NULL, ticks); diff --git a/kernel/unified/sched.c b/kernel/unified/sched.c index 74090cb78c2..f0729be0fc5 100644 --- a/kernel/unified/sched.c +++ b/kernel/unified/sched.c @@ -168,9 +168,9 @@ void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, int32_t timeout) _mark_thread_as_pending(thread); if (timeout != K_FOREVER) { - _mark_thread_as_timing(thread); - _add_thread_timeout(thread, wait_q, - _TICK_ALIGN + _ms_to_ticks(timeout)); + int32_t ticks = _TICK_ALIGN + _ms_to_ticks(timeout); + + _add_thread_timeout(thread, wait_q, ticks); } } @@ -306,12 +306,11 @@ void k_sleep(int32_t duration) return; } + int32_t ticks = _TICK_ALIGN + _ms_to_ticks(duration); int key = irq_lock(); - _mark_thread_as_timing(_current); _remove_thread_from_ready_q(_current); - _add_thread_timeout(_current, NULL, - _TICK_ALIGN + _ms_to_ticks(duration)); + _add_thread_timeout(_current, NULL, ticks); _Swap(key); } diff --git a/kernel/unified/sem.c b/kernel/unified/sem.c index 045ced996a2..ec3bc95f67b 100644 --- a/kernel/unified/sem.c +++ b/kernel/unified/sem.c @@ -222,8 +222,8 @@ static int handle_sem_group(struct k_sem *sem, struct k_thread *thread) */ if (!_is_thread_ready(desc->thread)) { - _reset_thread_states(desc->thread, K_PENDING | K_TIMING); _abort_thread_timeout(desc->thread); + _mark_thread_as_not_pending(desc->thread); if (_is_thread_ready(desc->thread)) { _add_thread_to_ready_q(desc->thread); } diff --git a/kernel/unified/thread.c b/kernel/unified/thread.c index 2d6ca1748f4..b2a14259db6 100644 --- a/kernel/unified/thread.c +++ b/kernel/unified/thread.c @@ -220,9 +220,9 @@ static void schedule_new_thread(struct k_thread *thread, int32_t delay) if (delay == 0) { start_thread(thread); } else { - _mark_thread_as_timing(thread); - _add_thread_timeout(thread, NULL, - _TICK_ALIGN + _ms_to_ticks(delay)); + int32_t ticks = _TICK_ALIGN + _ms_to_ticks(delay); + + _add_thread_timeout(thread, NULL, ticks); } #else ARG_UNUSED(delay); @@ -252,7 +252,8 @@ int k_thread_cancel(k_tid_t tid) int key = irq_lock(); - if (_has_thread_started(thread) || !_is_thread_timing(thread)) { + if (_has_thread_started(thread) || + !_is_thread_timeout_active(thread)) { irq_unlock(key); return -EINVAL; } @@ -365,9 +366,8 @@ void _k_thread_single_abort(struct k_thread *thread) if (_is_thread_pending(thread)) { _unpend_thread(thread); } - if (_is_thread_timing(thread)) { + if (_is_thread_timeout_active(thread)) { _abort_thread_timeout(thread); - _mark_thread_as_not_timing(thread); } } _mark_thread_as_dead(thread);