diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index a72072a3417..ec19e99c722 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -54,6 +54,16 @@ struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q, void idle(void *a, void *b, void *c); void z_time_slice(int ticks); +static inline void _pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout) +{ + (void) _pend_curr_irqlock(_arch_irq_lock(), wait_q, timeout); +} + +static inline void _reschedule_unlocked(void) +{ + (void) _reschedule_irqlock(_arch_irq_lock()); +} + /* find which one is the next thread to run */ /* must be called with interrupts locked */ #ifdef CONFIG_SMP diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index ab2eda30213..4d7b8a5b82e 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -103,6 +103,14 @@ static inline int _Swap(struct k_spinlock *lock, k_spinlock_key_t key) return do_swap(key.key, lock, 1); } +static inline void _Swap_unlocked(void) +{ + struct k_spinlock lock = {}; + k_spinlock_key_t key = k_spin_lock(&lock); + + (void) _Swap(&lock, key); +} + #else /* !CONFIG_USE_SWITCH */ extern int __swap(unsigned int key); @@ -137,6 +145,11 @@ static ALWAYS_INLINE int _Swap(struct k_spinlock *lock, k_spinlock_key_t key) return _Swap_irqlock(key.key); } +static inline void _Swap_unlocked(void) +{ + (void) _Swap_irqlock(_arch_irq_lock()); +} + #endif #endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */ diff --git a/kernel/init.c b/kernel/init.c index 14a77fe529e..b2eff58d396 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -390,8 +390,7 @@ static void switch_to_main_thread(void) * current fake thread is not on a wait queue or ready queue, so it * will never be rescheduled in. */ - - (void)_Swap_irqlock(irq_lock()); + _Swap_unlocked(); #endif } #endif /* CONFIG_MULTITHREADING */ diff --git a/kernel/mempool.c b/kernel/mempool.c index 9279540e6b6..3d9326d90a9 100644 --- a/kernel/mempool.c +++ b/kernel/mempool.c @@ -90,7 +90,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, return ret; } - (void)_pend_curr_irqlock(irq_lock(), &p->wait_q, timeout); + _pend_curr_unlocked(&p->wait_q, timeout); if (timeout != K_FOREVER) { timeout = end - z_tick_get(); diff --git a/kernel/sched.c b/kernel/sched.c index e52f1ece8e3..31ccc56ad4d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -490,7 +490,7 @@ void _thread_priority_set(struct k_thread *thread, int prio) sys_trace_thread_priority_set(thread); if (need_sched) { - _reschedule_irqlock(irq_lock()); + _reschedule_unlocked(); } } @@ -545,7 +545,7 @@ void k_sched_unlock(void) K_DEBUG("scheduler unlocked (%p:%d)\n", _current, _current->base.sched_locked); - _reschedule_irqlock(irq_lock()); + _reschedule_unlocked(); #endif } @@ -859,13 +859,7 @@ void _impl_k_yield(void) } } -#ifdef CONFIG_SMP - (void)_Swap_irqlock(irq_lock()); -#else - if (_get_next_ready_thread() != _current) { - (void)_Swap_irqlock(irq_lock()); - } -#endif + _Swap_unlocked(); } #ifdef CONFIG_USERSPACE diff --git a/kernel/smp.c b/kernel/smp.c index 85aa46b73c3..d0778f7a158 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -84,9 +84,8 @@ static void smp_init_top(int key, void *arg) }; _arch_curr_cpu()->current = &dummy_thread; - unsigned int k = irq_lock(); smp_timer_init(); - (void)_Swap(k); + _Swap_unlocked(); CODE_UNREACHABLE; } diff --git a/tests/kernel/fatal/src/main.c b/tests/kernel/fatal/src/main.c index 0ff73e49bed..5092f0553b4 100644 --- a/tests/kernel/fatal/src/main.c +++ b/tests/kernel/fatal/src/main.c @@ -141,7 +141,7 @@ void stack_thread2(void) /* Test that stack overflow check due to swap works */ blow_up_stack(); TC_PRINT("swapping...\n"); - _Swap_irqlock(irq_lock()); + _Swap_unlocked(); TC_ERROR("should never see this\n"); rv = TC_FAIL; irq_unlock(key);