kernel: Split _Swap() API into irqlock and spinlock variants
We want a _Swap() variant that can atomically release/restore a spinlock state in addition to the legacy irqlock. The function as it was is now named "_Swap_irqlock()", while _Swap() now refers to a spinlock and takes two arguments. The former will be going away once existing users (not that many! Swap() is an internal API, and the long port away from legacy irqlocking is going to be happening mostly in drivers) are ported to spinlocks. Obviously on uniprocessor setups, these produce identical code. But SMP requires that the correct API be used to maintain the global lock. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
53cae5f471
commit
aa6e21c24c
10 changed files with 68 additions and 18 deletions
|
@ -41,7 +41,7 @@ void _impl_k_thread_abort(k_tid_t thread)
|
||||||
|
|
||||||
if (_current == thread) {
|
if (_current == thread) {
|
||||||
if ((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) == 0) {
|
if ((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) == 0) {
|
||||||
(void)_Swap(key);
|
(void)_Swap_irqlock(key);
|
||||||
CODE_UNREACHABLE;
|
CODE_UNREACHABLE;
|
||||||
} else {
|
} else {
|
||||||
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
|
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
|
||||||
|
|
|
@ -510,7 +510,7 @@ void _impl_k_thread_abort(k_tid_t thread)
|
||||||
thread_idx,
|
thread_idx,
|
||||||
__func__);
|
__func__);
|
||||||
|
|
||||||
(void)_Swap(key);
|
(void)_Swap_irqlock(key);
|
||||||
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
|
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ void _arch_isr_direct_footer(int swap)
|
||||||
:
|
:
|
||||||
: "memory"
|
: "memory"
|
||||||
);
|
);
|
||||||
(void)_Swap(flags);
|
(void)_Swap_irqlock(flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -112,7 +112,7 @@ void posix_irq_handler(void)
|
||||||
&& (hw_irq_ctrl_get_cur_prio() == 256)
|
&& (hw_irq_ctrl_get_cur_prio() == 256)
|
||||||
&& (_kernel.ready_q.cache != _current)) {
|
&& (_kernel.ready_q.cache != _current)) {
|
||||||
|
|
||||||
(void)_Swap(irq_lock);
|
(void)_Swap_irqlock(irq_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -170,7 +170,7 @@ void posix_irq_handler(void)
|
||||||
&& (CPU_will_be_awaken_from_WFE == false)
|
&& (CPU_will_be_awaken_from_WFE == false)
|
||||||
&& (_kernel.ready_q.cache != _current)) {
|
&& (_kernel.ready_q.cache != _current)) {
|
||||||
|
|
||||||
_Swap(irq_lock);
|
_Swap_irqlock(irq_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,4 +81,18 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
|
||||||
_arch_irq_unlock(key.key);
|
_arch_irq_unlock(key.key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Internal function: releases the lock, but leaves local interrupts
|
||||||
|
* disabled
|
||||||
|
*/
|
||||||
|
static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
|
||||||
|
{
|
||||||
|
#ifdef SPIN_VALIDATE
|
||||||
|
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
atomic_clear(&l->locked);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */
|
#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
#define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
|
#define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
|
||||||
|
|
||||||
#include <ksched.h>
|
#include <ksched.h>
|
||||||
|
#include <spinlock.h>
|
||||||
#include <kernel_arch_func.h>
|
#include <kernel_arch_func.h>
|
||||||
|
|
||||||
#ifdef CONFIG_STACK_SENTINEL
|
#ifdef CONFIG_STACK_SENTINEL
|
||||||
|
@ -15,7 +16,6 @@ extern void _check_stack_sentinel(void);
|
||||||
#define _check_stack_sentinel() /**/
|
#define _check_stack_sentinel() /**/
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* In SMP, the irq_lock() is a spinlock which is implicitly released
|
/* In SMP, the irq_lock() is a spinlock which is implicitly released
|
||||||
* and reacquired on context switch to preserve the existing
|
* and reacquired on context switch to preserve the existing
|
||||||
* semantics. This means that whenever we are about to return to a
|
* semantics. This means that whenever we are about to return to a
|
||||||
|
@ -33,9 +33,15 @@ void _smp_release_global_lock(struct k_thread *thread);
|
||||||
* primitive that doesn't know about the scheduler or return value.
|
* primitive that doesn't know about the scheduler or return value.
|
||||||
* Needed for SMP, where the scheduler requires spinlocking that we
|
* Needed for SMP, where the scheduler requires spinlocking that we
|
||||||
* don't want to have to do in per-architecture assembly.
|
* don't want to have to do in per-architecture assembly.
|
||||||
|
*
|
||||||
|
* Note that is_spinlock is a compile-time construct which will be
|
||||||
|
* optimized out when this function is expanded.
|
||||||
*/
|
*/
|
||||||
static inline int _Swap(unsigned int key)
|
static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
|
||||||
|
struct k_spinlock *lock,
|
||||||
|
int is_spinlock)
|
||||||
{
|
{
|
||||||
|
ARG_UNUSED(lock);
|
||||||
struct k_thread *new_thread, *old_thread;
|
struct k_thread *new_thread, *old_thread;
|
||||||
|
|
||||||
#ifdef CONFIG_EXECUTION_BENCHMARKING
|
#ifdef CONFIG_EXECUTION_BENCHMARKING
|
||||||
|
@ -51,6 +57,10 @@ static inline int _Swap(unsigned int key)
|
||||||
sys_trace_thread_switched_out();
|
sys_trace_thread_switched_out();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
if (is_spinlock) {
|
||||||
|
k_spin_release(lock);
|
||||||
|
}
|
||||||
|
|
||||||
new_thread = _get_next_ready_thread();
|
new_thread = _get_next_ready_thread();
|
||||||
|
|
||||||
if (new_thread != old_thread) {
|
if (new_thread != old_thread) {
|
||||||
|
@ -61,9 +71,10 @@ static inline int _Swap(unsigned int key)
|
||||||
|
|
||||||
new_thread->base.cpu = _arch_curr_cpu()->id;
|
new_thread->base.cpu = _arch_curr_cpu()->id;
|
||||||
|
|
||||||
_smp_release_global_lock(new_thread);
|
if (!is_spinlock) {
|
||||||
|
_smp_release_global_lock(new_thread);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
_current = new_thread;
|
_current = new_thread;
|
||||||
_arch_switch(new_thread->switch_handle,
|
_arch_switch(new_thread->switch_handle,
|
||||||
&old_thread->switch_handle);
|
&old_thread->switch_handle);
|
||||||
|
@ -73,16 +84,30 @@ static inline int _Swap(unsigned int key)
|
||||||
sys_trace_thread_switched_in();
|
sys_trace_thread_switched_in();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
irq_unlock(key);
|
if (is_spinlock) {
|
||||||
|
_arch_irq_unlock(key);
|
||||||
|
} else {
|
||||||
|
irq_unlock(key);
|
||||||
|
}
|
||||||
|
|
||||||
return _current->swap_retval;
|
return _current->swap_retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int _Swap_irqlock(unsigned int key)
|
||||||
|
{
|
||||||
|
return do_swap(key, NULL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int _Swap(struct k_spinlock *lock, k_spinlock_key_t key)
|
||||||
|
{
|
||||||
|
return do_swap(key.key, lock, 1);
|
||||||
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_USE_SWITCH */
|
#else /* !CONFIG_USE_SWITCH */
|
||||||
|
|
||||||
extern int __swap(unsigned int key);
|
extern int __swap(unsigned int key);
|
||||||
|
|
||||||
static inline int _Swap(unsigned int key)
|
static inline int _Swap_irqlock(unsigned int key)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
_check_stack_sentinel();
|
_check_stack_sentinel();
|
||||||
|
@ -101,6 +126,17 @@ static inline int _Swap(unsigned int key)
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If !USE_SWITCH, then spinlocks are guaranteed degenerate as we
|
||||||
|
* can't be in SMP. The k_spin_release() call is just for validation
|
||||||
|
* handling.
|
||||||
|
*/
|
||||||
|
static ALWAYS_INLINE int _Swap(struct k_spinlock *lock, k_spinlock_key_t key)
|
||||||
|
{
|
||||||
|
k_spin_release(lock);
|
||||||
|
return _Swap_irqlock(key.key);
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */
|
#endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */
|
||||||
|
|
|
@ -391,7 +391,7 @@ static void switch_to_main_thread(void)
|
||||||
* will never be rescheduled in.
|
* will never be rescheduled in.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
(void)_Swap(irq_lock());
|
(void)_Swap_irqlock(irq_lock());
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MULTITHREADING */
|
#endif /* CONFIG_MULTITHREADING */
|
||||||
|
|
|
@ -434,7 +434,7 @@ int _pend_current_thread(u32_t key, _wait_q_t *wait_q, s32_t timeout)
|
||||||
pending_current = _current;
|
pending_current = _current;
|
||||||
#endif
|
#endif
|
||||||
pend(_current, wait_q, timeout);
|
pend(_current, wait_q, timeout);
|
||||||
return _Swap(key);
|
return _Swap_irqlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
|
struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
|
||||||
|
@ -498,7 +498,7 @@ void _reschedule(u32_t key)
|
||||||
goto noswap;
|
goto noswap;
|
||||||
}
|
}
|
||||||
|
|
||||||
(void)_Swap(key);
|
(void)_Swap_irqlock(key);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
noswap:
|
noswap:
|
||||||
|
@ -841,10 +841,10 @@ void _impl_k_yield(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
(void)_Swap(irq_lock());
|
(void)_Swap_irqlock(irq_lock());
|
||||||
#else
|
#else
|
||||||
if (_get_next_ready_thread() != _current) {
|
if (_get_next_ready_thread() != _current) {
|
||||||
(void)_Swap(irq_lock());
|
(void)_Swap_irqlock(irq_lock());
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -878,7 +878,7 @@ s32_t _impl_k_sleep(s32_t duration)
|
||||||
_remove_thread_from_ready_q(_current);
|
_remove_thread_from_ready_q(_current);
|
||||||
_add_thread_timeout(_current, ticks);
|
_add_thread_timeout(_current, ticks);
|
||||||
|
|
||||||
(void)_Swap(key);
|
(void)_Swap_irqlock(key);
|
||||||
|
|
||||||
ticks = expected_wakeup_time - z_tick_get_32();
|
ticks = expected_wakeup_time - z_tick_get_32();
|
||||||
if (ticks > 0) {
|
if (ticks > 0) {
|
||||||
|
|
|
@ -141,7 +141,7 @@ void stack_thread2(void)
|
||||||
/* Test that stack overflow check due to swap works */
|
/* Test that stack overflow check due to swap works */
|
||||||
blow_up_stack();
|
blow_up_stack();
|
||||||
TC_PRINT("swapping...\n");
|
TC_PRINT("swapping...\n");
|
||||||
_Swap(irq_lock());
|
_Swap_irqlock(irq_lock());
|
||||||
TC_ERROR("should never see this\n");
|
TC_ERROR("should never see this\n");
|
||||||
rv = TC_FAIL;
|
rv = TC_FAIL;
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue