From 04ed860c68040139a7aeb7ae8fda3e3d2439cd3b Mon Sep 17 00:00:00 2001 From: Benjamin Walsh Date: Wed, 21 Dec 2016 14:36:43 -0500 Subject: [PATCH] kernel: make _thread.sched_locked a non-atomic operator variable Not needed, since only the thread itself can modifiy its own sched_locked count. Change-Id: I3d3d8be548d2b24ca14f51637cc58bda66f8b9ee Signed-off-by: Benjamin Walsh --- kernel/include/kernel_structs.h | 4 ++-- kernel/include/ksched.h | 8 ++++---- kernel/sched.c | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/include/kernel_structs.h b/kernel/include/kernel_structs.h index c82fc61a5eb..4de35ab3563 100644 --- a/kernel/include/kernel_structs.h +++ b/kernel/include/kernel_structs.h @@ -82,7 +82,7 @@ struct _thread_base { int prio; /* scheduler lock count */ - atomic_t sched_locked; + volatile uint32_t sched_locked; /* data returned by APIs */ void *swap_data; @@ -136,7 +136,7 @@ typedef struct k_thread _thread_t; struct _ready_q { - /* next thread to run if known, NULL otherwise */ + /* always contains next thread to run: cannot be NULL */ struct k_thread *cache; /* bitmap of priorities that contain at least one ready thread */ diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 7cfc08de54a..222ad2a70d5 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -146,7 +146,7 @@ static inline int _is_coop(struct k_thread *thread) static inline int _is_preempt(struct k_thread *thread) { #ifdef CONFIG_PREEMPT_ENABLED - return !_is_coop(thread) && !atomic_get(&thread->base.sched_locked); + return !_is_coop(thread) && !thread->base.sched_locked; #else return 0; #endif @@ -245,7 +245,7 @@ static inline void _sched_lock(void) #ifdef CONFIG_PREEMPT_ENABLED __ASSERT(!_is_in_isr(), ""); - atomic_inc(&_current->base.sched_locked); + ++_current->base.sched_locked; K_DEBUG("scheduler locked (%p:%d)\n", _current, _current->base.sched_locked); @@ -258,12 +258,12 @@ static inline void _sched_lock(void) * It is incumbent upon the caller to ensure that the reschedule occurs * sometime after the scheduler is unlocked. */ -static inline void _sched_unlock_no_reschedule(void) +static ALWAYS_INLINE void _sched_unlock_no_reschedule(void) { #ifdef CONFIG_PREEMPT_ENABLED __ASSERT(!_is_in_isr(), ""); - atomic_dec(&_current->base.sched_locked); + --_current->base.sched_locked; #endif } diff --git a/kernel/sched.c b/kernel/sched.c index c6674c01316..96f90e58a46 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -149,7 +149,7 @@ void k_sched_lock(void) #ifdef CONFIG_PREEMPT_ENABLED __ASSERT(!_is_in_isr(), ""); - atomic_inc(&_current->base.sched_locked); + ++_current->base.sched_locked; K_DEBUG("scheduler locked (%p:%d)\n", _current, _current->base.sched_locked); @@ -164,7 +164,7 @@ void k_sched_unlock(void) int key = irq_lock(); - atomic_dec(&_current->base.sched_locked); + --_current->base.sched_locked; K_DEBUG("scheduler unlocked (%p:%d)\n", _current, _current->base.sched_locked);