From 8d7c274e55a0ca9742a4ca05daa57b6c52afe3db Mon Sep 17 00:00:00 2001 From: Benjamin Walsh Date: Sat, 11 Feb 2017 10:50:27 -0500 Subject: [PATCH] kernel/sched: protect thread sched_lock with compiler barriers This has not bitten us yet, but it was a ticking timebomb. This is similar to the issue that was found with irq_lock/irq_unlock implementations on several architectures. Having a volatile variable is not the way to force the sched_lock variable to be incremented/decremented around the accesses to data it protects. Instead, a compiler barrier must prevent the compiler from reordering the memory accesses around setting of sched_lock. Needed in the inline implementations _sched_lock()/_sched_unlock_no_reschedule(), which resolve to simple decrement/increment of the per-thread sched_lock variable. Change-Id: I06f5b3524889f193efe69caa947118404b1be0b5 Signed-off-by: Benjamin Walsh --- kernel/include/kernel_structs.h | 4 ++-- kernel/include/ksched.h | 6 +++++- kernel/sched.c | 9 ++++++++- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/kernel/include/kernel_structs.h b/kernel/include/kernel_structs.h index f6c504f485d..9fdb28e4fca 100644 --- a/kernel/include/kernel_structs.h +++ b/kernel/include/kernel_structs.h @@ -93,11 +93,11 @@ struct _thread_base { union { struct { #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - volatile uint8_t sched_locked; + uint8_t sched_locked; int8_t prio; #else /* LITTLE and PDP */ int8_t prio; - volatile uint8_t sched_locked; + uint8_t sched_locked; #endif }; uint16_t preempt; diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 00f916b84f1..23999a1e8da 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Wind River Systems, Inc. + * Copyright (c) 2016-2017 Wind River Systems, Inc. * * SPDX-License-Identifier: Apache-2.0 */ @@ -239,6 +239,8 @@ static inline void _sched_lock(void) --_current->base.sched_locked; + compiler_barrier(); + K_DEBUG("scheduler locked (%p:%d)\n", _current, _current->base.sched_locked); #endif @@ -256,6 +258,8 @@ static ALWAYS_INLINE void _sched_unlock_no_reschedule(void) __ASSERT(!_is_in_isr(), ""); __ASSERT(_current->base.sched_locked != 0, ""); + compiler_barrier(); + ++_current->base.sched_locked; #endif } diff --git a/kernel/sched.c b/kernel/sched.c index 0771e0ecbc1..e2da289975c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Wind River Systems, Inc. + * Copyright (c) 2016-2017 Wind River Systems, Inc. * * SPDX-License-Identifier: Apache-2.0 */ @@ -142,6 +142,11 @@ void k_sched_lock(void) --_current->base.sched_locked; + /* Probably not needed since we're in a real function, + * but it doesn't hurt. + */ + compiler_barrier(); + K_DEBUG("scheduler locked (%p:%d)\n", _current, _current->base.sched_locked); #endif @@ -155,6 +160,8 @@ void k_sched_unlock(void) int key = irq_lock(); + /* compiler_barrier() not needed, comes from irq_lock() */ + ++_current->base.sched_locked; K_DEBUG("scheduler unlocked (%p:%d)\n",