kernel/arch: reverse polarity on sched_locked

This will allow for an enhancement when checking if the thread is
preemptible when exiting an interrupt.

Change-Id: If93ccd1916eacb5e02a4d15b259fb74f9800d6f4
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2016-12-21 14:54:04 -05:00 committed by Andrew Boie
commit e6a69cae54
7 changed files with 14 additions and 11 deletions

View file

@ -244,8 +244,9 @@ static inline void _sched_lock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 1, "");
++_current->base.sched_locked;
--_current->base.sched_locked;
K_DEBUG("scheduler locked (%p:%d)\n",
_current, _current->base.sched_locked);
@ -262,8 +263,9 @@ static ALWAYS_INLINE void _sched_unlock_no_reschedule(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 0, "");
--_current->base.sched_locked;
++_current->base.sched_locked;
#endif
}