kernel/sched: Fix lockless ordering in halt_thread()
We've had threads spinning on the thread state bits, but weren't being careful to ensure that those bits were the last things seen to change in a halting thread. Move it to the end, and add a barrier for correctness. Signed-off-by: Andy Ross <andyross@google.com>
This commit is contained in:
parent
20611f13ca
commit
02b24911f7
1 changed files with 8 additions and 1 deletions
|
@ -195,6 +195,7 @@ static inline bool is_halting(struct k_thread *thread)
|
||||||
/* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
|
/* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
|
||||||
static inline void clear_halting(struct k_thread *thread)
|
static inline void clear_halting(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
|
barrier_dmem_fence_full(); /* Other cpus spin on this locklessly! */
|
||||||
thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
|
thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1296,7 +1297,6 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
|
||||||
*/
|
*/
|
||||||
if ((thread->base.thread_state & new_state) == 0U) {
|
if ((thread->base.thread_state & new_state) == 0U) {
|
||||||
thread->base.thread_state |= new_state;
|
thread->base.thread_state |= new_state;
|
||||||
clear_halting(thread);
|
|
||||||
if (z_is_thread_queued(thread)) {
|
if (z_is_thread_queued(thread)) {
|
||||||
dequeue_thread(thread);
|
dequeue_thread(thread);
|
||||||
}
|
}
|
||||||
|
@ -1324,6 +1324,7 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
|
||||||
update_cache(1);
|
update_cache(1);
|
||||||
|
|
||||||
if (new_state == _THREAD_SUSPENDED) {
|
if (new_state == _THREAD_SUSPENDED) {
|
||||||
|
clear_halting(thread);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1365,6 +1366,12 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
|
||||||
if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
|
if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
|
||||||
z_dummy_thread_init(&_thread_dummy);
|
z_dummy_thread_init(&_thread_dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Finally update the halting thread state, on which
|
||||||
|
* other CPUs might be spinning (see
|
||||||
|
* thread_halt_spin()).
|
||||||
|
*/
|
||||||
|
clear_halting(thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue