diff --git a/arch/arm/core/thread_abort.c b/arch/arm/core/thread_abort.c index 605a5795c73..619e1f8ff6a 100644 --- a/arch/arm/core/thread_abort.c +++ b/arch/arm/core/thread_abort.c @@ -41,7 +41,7 @@ void _impl_k_thread_abort(k_tid_t thread) if (_current == thread) { if ((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) == 0) { - (void)_Swap(key); + (void)_Swap_irqlock(key); CODE_UNREACHABLE; } else { SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; diff --git a/arch/posix/core/posix_core.c b/arch/posix/core/posix_core.c index 72e33ff54df..977d2c9ae8f 100644 --- a/arch/posix/core/posix_core.c +++ b/arch/posix/core/posix_core.c @@ -510,7 +510,7 @@ void _impl_k_thread_abort(k_tid_t thread) thread_idx, __func__); - (void)_Swap(key); + (void)_Swap_irqlock(key); CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ } diff --git a/arch/x86/core/irq_manage.c b/arch/x86/core/irq_manage.c index 52070a5093b..d28d80cfaba 100644 --- a/arch/x86/core/irq_manage.c +++ b/arch/x86/core/irq_manage.c @@ -95,7 +95,7 @@ void _arch_isr_direct_footer(int swap) : : "memory" ); - (void)_Swap(flags); + (void)_Swap_irqlock(flags); } } diff --git a/boards/posix/native_posix/irq_handler.c b/boards/posix/native_posix/irq_handler.c index 677a71e9796..8b8ec2a6804 100644 --- a/boards/posix/native_posix/irq_handler.c +++ b/boards/posix/native_posix/irq_handler.c @@ -112,7 +112,7 @@ void posix_irq_handler(void) && (hw_irq_ctrl_get_cur_prio() == 256) && (_kernel.ready_q.cache != _current)) { - (void)_Swap(irq_lock); + (void)_Swap_irqlock(irq_lock); } } diff --git a/boards/posix/nrf52_bsim/irq_handler.c b/boards/posix/nrf52_bsim/irq_handler.c index 4013ea0d146..6744fe6e457 100644 --- a/boards/posix/nrf52_bsim/irq_handler.c +++ b/boards/posix/nrf52_bsim/irq_handler.c @@ -170,7 +170,7 @@ void posix_irq_handler(void) && (CPU_will_be_awaken_from_WFE == false) && (_kernel.ready_q.cache != _current)) { - _Swap(irq_lock); + _Swap_irqlock(irq_lock); } } diff --git a/include/spinlock.h b/include/spinlock.h index 85a70b5a839..c8367fc0e39 100644 --- a/include/spinlock.h +++ b/include/spinlock.h @@ -81,4 +81,18 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, _arch_irq_unlock(key.key); } +/* Internal function: releases the lock, but leaves local interrupts + * disabled + */ +static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l) +{ +#ifdef SPIN_VALIDATE + __ASSERT(z_spin_unlock_valid(l), "Not my spinlock!"); +#endif +#ifdef CONFIG_SMP + atomic_clear(&l->locked); +#endif +} + + #endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */ diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index e09ca8f91c5..ab2eda30213 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -7,6 +7,7 @@ #define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ #include +#include #include #ifdef CONFIG_STACK_SENTINEL @@ -15,7 +16,6 @@ extern void _check_stack_sentinel(void); #define _check_stack_sentinel() /**/ #endif - /* In SMP, the irq_lock() is a spinlock which is implicitly released * and reacquired on context switch to preserve the existing * semantics. This means that whenever we are about to return to a @@ -33,9 +33,15 @@ void _smp_release_global_lock(struct k_thread *thread); * primitive that doesn't know about the scheduler or return value. * Needed for SMP, where the scheduler requires spinlocking that we * don't want to have to do in per-architecture assembly. + * + * Note that is_spinlock is a compile-time construct which will be + * optimized out when this function is expanded. */ -static inline int _Swap(unsigned int key) +static ALWAYS_INLINE unsigned int do_swap(unsigned int key, + struct k_spinlock *lock, + int is_spinlock) { + ARG_UNUSED(lock); struct k_thread *new_thread, *old_thread; #ifdef CONFIG_EXECUTION_BENCHMARKING @@ -51,6 +57,10 @@ static inline int _Swap(unsigned int key) sys_trace_thread_switched_out(); #endif + if (is_spinlock) { + k_spin_release(lock); + } + new_thread = _get_next_ready_thread(); if (new_thread != old_thread) { @@ -61,9 +71,10 @@ static inline int _Swap(unsigned int key) new_thread->base.cpu = _arch_curr_cpu()->id; - _smp_release_global_lock(new_thread); + if (!is_spinlock) { + _smp_release_global_lock(new_thread); + } #endif - _current = new_thread; _arch_switch(new_thread->switch_handle, &old_thread->switch_handle); @@ -73,16 +84,30 @@ static inline int _Swap(unsigned int key) sys_trace_thread_switched_in(); #endif - irq_unlock(key); + if (is_spinlock) { + _arch_irq_unlock(key); + } else { + irq_unlock(key); + } return _current->swap_retval; } +static inline int _Swap_irqlock(unsigned int key) +{ + return do_swap(key, NULL, 0); +} + +static inline int _Swap(struct k_spinlock *lock, k_spinlock_key_t key) +{ + return do_swap(key.key, lock, 1); +} + #else /* !CONFIG_USE_SWITCH */ extern int __swap(unsigned int key); -static inline int _Swap(unsigned int key) +static inline int _Swap_irqlock(unsigned int key) { int ret; _check_stack_sentinel(); @@ -101,6 +126,17 @@ static inline int _Swap(unsigned int key) return ret; } + +/* If !USE_SWITCH, then spinlocks are guaranteed degenerate as we + * can't be in SMP. The k_spin_release() call is just for validation + * handling. + */ +static ALWAYS_INLINE int _Swap(struct k_spinlock *lock, k_spinlock_key_t key) +{ + k_spin_release(lock); + return _Swap_irqlock(key.key); +} + #endif #endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */ diff --git a/kernel/init.c b/kernel/init.c index 59896116258..14a77fe529e 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -391,7 +391,7 @@ static void switch_to_main_thread(void) * will never be rescheduled in. */ - (void)_Swap(irq_lock()); + (void)_Swap_irqlock(irq_lock()); #endif } #endif /* CONFIG_MULTITHREADING */ diff --git a/kernel/sched.c b/kernel/sched.c index 4ce62b2ed0a..6bed158be4a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -434,7 +434,7 @@ int _pend_current_thread(u32_t key, _wait_q_t *wait_q, s32_t timeout) pending_current = _current; #endif pend(_current, wait_q, timeout); - return _Swap(key); + return _Swap_irqlock(key); } struct k_thread *_unpend_first_thread(_wait_q_t *wait_q) @@ -498,7 +498,7 @@ void _reschedule(u32_t key) goto noswap; } - (void)_Swap(key); + (void)_Swap_irqlock(key); return; noswap: @@ -841,10 +841,10 @@ void _impl_k_yield(void) } #ifdef CONFIG_SMP - (void)_Swap(irq_lock()); + (void)_Swap_irqlock(irq_lock()); #else if (_get_next_ready_thread() != _current) { - (void)_Swap(irq_lock()); + (void)_Swap_irqlock(irq_lock()); } #endif } @@ -878,7 +878,7 @@ s32_t _impl_k_sleep(s32_t duration) _remove_thread_from_ready_q(_current); _add_thread_timeout(_current, ticks); - (void)_Swap(key); + (void)_Swap_irqlock(key); ticks = expected_wakeup_time - z_tick_get_32(); if (ticks > 0) { diff --git a/tests/kernel/fatal/src/main.c b/tests/kernel/fatal/src/main.c index ffef259bab5..0ff73e49bed 100644 --- a/tests/kernel/fatal/src/main.c +++ b/tests/kernel/fatal/src/main.c @@ -141,7 +141,7 @@ void stack_thread2(void) /* Test that stack overflow check due to swap works */ blow_up_stack(); TC_PRINT("swapping...\n"); - _Swap(irq_lock()); + _Swap_irqlock(irq_lock()); TC_ERROR("should never see this\n"); rv = TC_FAIL; irq_unlock(key);