kernel: make _thread.sched_locked a non-atomic operator variable
Not needed, since only the thread itself can modifiy its own sched_locked count. Change-Id: I3d3d8be548d2b24ca14f51637cc58bda66f8b9ee Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
parent
c0f0dd9734
commit
04ed860c68
3 changed files with 8 additions and 8 deletions
|
@ -146,7 +146,7 @@ static inline int _is_coop(struct k_thread *thread)
|
|||
static inline int _is_preempt(struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_PREEMPT_ENABLED
|
||||
return !_is_coop(thread) && !atomic_get(&thread->base.sched_locked);
|
||||
return !_is_coop(thread) && !thread->base.sched_locked;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
|
@ -245,7 +245,7 @@ static inline void _sched_lock(void)
|
|||
#ifdef CONFIG_PREEMPT_ENABLED
|
||||
__ASSERT(!_is_in_isr(), "");
|
||||
|
||||
atomic_inc(&_current->base.sched_locked);
|
||||
++_current->base.sched_locked;
|
||||
|
||||
K_DEBUG("scheduler locked (%p:%d)\n",
|
||||
_current, _current->base.sched_locked);
|
||||
|
@ -258,12 +258,12 @@ static inline void _sched_lock(void)
|
|||
* It is incumbent upon the caller to ensure that the reschedule occurs
|
||||
* sometime after the scheduler is unlocked.
|
||||
*/
|
||||
static inline void _sched_unlock_no_reschedule(void)
|
||||
static ALWAYS_INLINE void _sched_unlock_no_reschedule(void)
|
||||
{
|
||||
#ifdef CONFIG_PREEMPT_ENABLED
|
||||
__ASSERT(!_is_in_isr(), "");
|
||||
|
||||
atomic_dec(&_current->base.sched_locked);
|
||||
--_current->base.sched_locked;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue