kernel/arch: reverse polarity on sched_locked
This will allow for an enhancement when checking if the thread is preemptible when exiting an interrupt. Change-Id: If93ccd1916eacb5e02a4d15b259fb74f9800d6f4 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
parent
04ed860c68
commit
e6a69cae54
7 changed files with 14 additions and 11 deletions
|
@ -244,8 +244,9 @@ static inline void _sched_lock(void)
|
|||
{
|
||||
#ifdef CONFIG_PREEMPT_ENABLED
|
||||
__ASSERT(!_is_in_isr(), "");
|
||||
__ASSERT(_current->base.sched_locked != 1, "");
|
||||
|
||||
++_current->base.sched_locked;
|
||||
--_current->base.sched_locked;
|
||||
|
||||
K_DEBUG("scheduler locked (%p:%d)\n",
|
||||
_current, _current->base.sched_locked);
|
||||
|
@ -262,8 +263,9 @@ static ALWAYS_INLINE void _sched_unlock_no_reschedule(void)
|
|||
{
|
||||
#ifdef CONFIG_PREEMPT_ENABLED
|
||||
__ASSERT(!_is_in_isr(), "");
|
||||
__ASSERT(_current->base.sched_locked != 0, "");
|
||||
|
||||
--_current->base.sched_locked;
|
||||
++_current->base.sched_locked;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue