kernel: Add _unlocked() variant to context switch primitives
These functions, for good design reason, take a locking key to atomically release along with the context swtich. But there's still a common pattern in code to do a switch unconditionally by passing irq_lock() directly. On SMP that's a little hurtful as it spams the global lock. Provide an _unlocked() variant for _Swap/_reschedule/_pend_curr for simplicity and efficiency. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
da37a53a54
commit
1bf9bd04b1
7 changed files with 30 additions and 15 deletions
|
@ -54,6 +54,16 @@ struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q,
|
||||||
void idle(void *a, void *b, void *c);
|
void idle(void *a, void *b, void *c);
|
||||||
void z_time_slice(int ticks);
|
void z_time_slice(int ticks);
|
||||||
|
|
||||||
|
static inline void _pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout)
|
||||||
|
{
|
||||||
|
(void) _pend_curr_irqlock(_arch_irq_lock(), wait_q, timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void _reschedule_unlocked(void)
|
||||||
|
{
|
||||||
|
(void) _reschedule_irqlock(_arch_irq_lock());
|
||||||
|
}
|
||||||
|
|
||||||
/* find which one is the next thread to run */
|
/* find which one is the next thread to run */
|
||||||
/* must be called with interrupts locked */
|
/* must be called with interrupts locked */
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
|
@ -103,6 +103,14 @@ static inline int _Swap(struct k_spinlock *lock, k_spinlock_key_t key)
|
||||||
return do_swap(key.key, lock, 1);
|
return do_swap(key.key, lock, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void _Swap_unlocked(void)
|
||||||
|
{
|
||||||
|
struct k_spinlock lock = {};
|
||||||
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||||
|
|
||||||
|
(void) _Swap(&lock, key);
|
||||||
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_USE_SWITCH */
|
#else /* !CONFIG_USE_SWITCH */
|
||||||
|
|
||||||
extern int __swap(unsigned int key);
|
extern int __swap(unsigned int key);
|
||||||
|
@ -137,6 +145,11 @@ static ALWAYS_INLINE int _Swap(struct k_spinlock *lock, k_spinlock_key_t key)
|
||||||
return _Swap_irqlock(key.key);
|
return _Swap_irqlock(key.key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void _Swap_unlocked(void)
|
||||||
|
{
|
||||||
|
(void) _Swap_irqlock(_arch_irq_lock());
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */
|
#endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */
|
||||||
|
|
|
@ -390,8 +390,7 @@ static void switch_to_main_thread(void)
|
||||||
* current fake thread is not on a wait queue or ready queue, so it
|
* current fake thread is not on a wait queue or ready queue, so it
|
||||||
* will never be rescheduled in.
|
* will never be rescheduled in.
|
||||||
*/
|
*/
|
||||||
|
_Swap_unlocked();
|
||||||
(void)_Swap_irqlock(irq_lock());
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MULTITHREADING */
|
#endif /* CONFIG_MULTITHREADING */
|
||||||
|
|
|
@ -90,7 +90,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
(void)_pend_curr_irqlock(irq_lock(), &p->wait_q, timeout);
|
_pend_curr_unlocked(&p->wait_q, timeout);
|
||||||
|
|
||||||
if (timeout != K_FOREVER) {
|
if (timeout != K_FOREVER) {
|
||||||
timeout = end - z_tick_get();
|
timeout = end - z_tick_get();
|
||||||
|
|
|
@ -490,7 +490,7 @@ void _thread_priority_set(struct k_thread *thread, int prio)
|
||||||
sys_trace_thread_priority_set(thread);
|
sys_trace_thread_priority_set(thread);
|
||||||
|
|
||||||
if (need_sched) {
|
if (need_sched) {
|
||||||
_reschedule_irqlock(irq_lock());
|
_reschedule_unlocked();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -545,7 +545,7 @@ void k_sched_unlock(void)
|
||||||
K_DEBUG("scheduler unlocked (%p:%d)\n",
|
K_DEBUG("scheduler unlocked (%p:%d)\n",
|
||||||
_current, _current->base.sched_locked);
|
_current, _current->base.sched_locked);
|
||||||
|
|
||||||
_reschedule_irqlock(irq_lock());
|
_reschedule_unlocked();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -859,13 +859,7 @@ void _impl_k_yield(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
_Swap_unlocked();
|
||||||
(void)_Swap_irqlock(irq_lock());
|
|
||||||
#else
|
|
||||||
if (_get_next_ready_thread() != _current) {
|
|
||||||
(void)_Swap_irqlock(irq_lock());
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
|
|
@ -84,9 +84,8 @@ static void smp_init_top(int key, void *arg)
|
||||||
};
|
};
|
||||||
|
|
||||||
_arch_curr_cpu()->current = &dummy_thread;
|
_arch_curr_cpu()->current = &dummy_thread;
|
||||||
unsigned int k = irq_lock();
|
|
||||||
smp_timer_init();
|
smp_timer_init();
|
||||||
(void)_Swap(k);
|
_Swap_unlocked();
|
||||||
|
|
||||||
CODE_UNREACHABLE;
|
CODE_UNREACHABLE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,7 +141,7 @@ void stack_thread2(void)
|
||||||
/* Test that stack overflow check due to swap works */
|
/* Test that stack overflow check due to swap works */
|
||||||
blow_up_stack();
|
blow_up_stack();
|
||||||
TC_PRINT("swapping...\n");
|
TC_PRINT("swapping...\n");
|
||||||
_Swap_irqlock(irq_lock());
|
_Swap_unlocked();
|
||||||
TC_ERROR("should never see this\n");
|
TC_ERROR("should never see this\n");
|
||||||
rv = TC_FAIL;
|
rv = TC_FAIL;
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue