kernel: make _thread.sched_locked a non-atomic operator variable
Not needed, since only the thread itself can modifiy its own sched_locked count. Change-Id: I3d3d8be548d2b24ca14f51637cc58bda66f8b9ee Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
parent
c0f0dd9734
commit
04ed860c68
3 changed files with 8 additions and 8 deletions
|
@ -82,7 +82,7 @@ struct _thread_base {
|
||||||
int prio;
|
int prio;
|
||||||
|
|
||||||
/* scheduler lock count */
|
/* scheduler lock count */
|
||||||
atomic_t sched_locked;
|
volatile uint32_t sched_locked;
|
||||||
|
|
||||||
/* data returned by APIs */
|
/* data returned by APIs */
|
||||||
void *swap_data;
|
void *swap_data;
|
||||||
|
@ -136,7 +136,7 @@ typedef struct k_thread _thread_t;
|
||||||
|
|
||||||
struct _ready_q {
|
struct _ready_q {
|
||||||
|
|
||||||
/* next thread to run if known, NULL otherwise */
|
/* always contains next thread to run: cannot be NULL */
|
||||||
struct k_thread *cache;
|
struct k_thread *cache;
|
||||||
|
|
||||||
/* bitmap of priorities that contain at least one ready thread */
|
/* bitmap of priorities that contain at least one ready thread */
|
||||||
|
|
|
@ -146,7 +146,7 @@ static inline int _is_coop(struct k_thread *thread)
|
||||||
static inline int _is_preempt(struct k_thread *thread)
|
static inline int _is_preempt(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PREEMPT_ENABLED
|
#ifdef CONFIG_PREEMPT_ENABLED
|
||||||
return !_is_coop(thread) && !atomic_get(&thread->base.sched_locked);
|
return !_is_coop(thread) && !thread->base.sched_locked;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
@ -245,7 +245,7 @@ static inline void _sched_lock(void)
|
||||||
#ifdef CONFIG_PREEMPT_ENABLED
|
#ifdef CONFIG_PREEMPT_ENABLED
|
||||||
__ASSERT(!_is_in_isr(), "");
|
__ASSERT(!_is_in_isr(), "");
|
||||||
|
|
||||||
atomic_inc(&_current->base.sched_locked);
|
++_current->base.sched_locked;
|
||||||
|
|
||||||
K_DEBUG("scheduler locked (%p:%d)\n",
|
K_DEBUG("scheduler locked (%p:%d)\n",
|
||||||
_current, _current->base.sched_locked);
|
_current, _current->base.sched_locked);
|
||||||
|
@ -258,12 +258,12 @@ static inline void _sched_lock(void)
|
||||||
* It is incumbent upon the caller to ensure that the reschedule occurs
|
* It is incumbent upon the caller to ensure that the reschedule occurs
|
||||||
* sometime after the scheduler is unlocked.
|
* sometime after the scheduler is unlocked.
|
||||||
*/
|
*/
|
||||||
static inline void _sched_unlock_no_reschedule(void)
|
static ALWAYS_INLINE void _sched_unlock_no_reschedule(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PREEMPT_ENABLED
|
#ifdef CONFIG_PREEMPT_ENABLED
|
||||||
__ASSERT(!_is_in_isr(), "");
|
__ASSERT(!_is_in_isr(), "");
|
||||||
|
|
||||||
atomic_dec(&_current->base.sched_locked);
|
--_current->base.sched_locked;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,7 @@ void k_sched_lock(void)
|
||||||
#ifdef CONFIG_PREEMPT_ENABLED
|
#ifdef CONFIG_PREEMPT_ENABLED
|
||||||
__ASSERT(!_is_in_isr(), "");
|
__ASSERT(!_is_in_isr(), "");
|
||||||
|
|
||||||
atomic_inc(&_current->base.sched_locked);
|
++_current->base.sched_locked;
|
||||||
|
|
||||||
K_DEBUG("scheduler locked (%p:%d)\n",
|
K_DEBUG("scheduler locked (%p:%d)\n",
|
||||||
_current, _current->base.sched_locked);
|
_current, _current->base.sched_locked);
|
||||||
|
@ -164,7 +164,7 @@ void k_sched_unlock(void)
|
||||||
|
|
||||||
int key = irq_lock();
|
int key = irq_lock();
|
||||||
|
|
||||||
atomic_dec(&_current->base.sched_locked);
|
--_current->base.sched_locked;
|
||||||
|
|
||||||
K_DEBUG("scheduler unlocked (%p:%d)\n",
|
K_DEBUG("scheduler unlocked (%p:%d)\n",
|
||||||
_current, _current->base.sched_locked);
|
_current, _current->base.sched_locked);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue