kernel: Add _unlocked() variant to context switch primitives

These functions, for good design reason, take a locking key to
atomically release along with the context swtich.  But there's still a
common pattern in code to do a switch unconditionally by passing
irq_lock() directly.  On SMP that's a little hurtful as it spams the
global lock.  Provide an _unlocked() variant for
_Swap/_reschedule/_pend_curr for simplicity and efficiency.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-07-24 14:39:38 -07:00 committed by Anas Nashif
commit 1bf9bd04b1
7 changed files with 30 additions and 15 deletions

View file

@ -54,6 +54,16 @@ struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q,
void idle(void *a, void *b, void *c);
void z_time_slice(int ticks);
static inline void _pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout)
{
(void) _pend_curr_irqlock(_arch_irq_lock(), wait_q, timeout);
}
static inline void _reschedule_unlocked(void)
{
(void) _reschedule_irqlock(_arch_irq_lock());
}
/* find which one is the next thread to run */
/* must be called with interrupts locked */
#ifdef CONFIG_SMP

View file

@ -103,6 +103,14 @@ static inline int _Swap(struct k_spinlock *lock, k_spinlock_key_t key)
return do_swap(key.key, lock, 1);
}
static inline void _Swap_unlocked(void)
{
struct k_spinlock lock = {};
k_spinlock_key_t key = k_spin_lock(&lock);
(void) _Swap(&lock, key);
}
#else /* !CONFIG_USE_SWITCH */
extern int __swap(unsigned int key);
@ -137,6 +145,11 @@ static ALWAYS_INLINE int _Swap(struct k_spinlock *lock, k_spinlock_key_t key)
return _Swap_irqlock(key.key);
}
static inline void _Swap_unlocked(void)
{
(void) _Swap_irqlock(_arch_irq_lock());
}
#endif
#endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */

View file

@ -390,8 +390,7 @@ static void switch_to_main_thread(void)
* current fake thread is not on a wait queue or ready queue, so it
* will never be rescheduled in.
*/
(void)_Swap_irqlock(irq_lock());
_Swap_unlocked();
#endif
}
#endif /* CONFIG_MULTITHREADING */

View file

@ -90,7 +90,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
return ret;
}
(void)_pend_curr_irqlock(irq_lock(), &p->wait_q, timeout);
_pend_curr_unlocked(&p->wait_q, timeout);
if (timeout != K_FOREVER) {
timeout = end - z_tick_get();

View file

@ -490,7 +490,7 @@ void _thread_priority_set(struct k_thread *thread, int prio)
sys_trace_thread_priority_set(thread);
if (need_sched) {
_reschedule_irqlock(irq_lock());
_reschedule_unlocked();
}
}
@ -545,7 +545,7 @@ void k_sched_unlock(void)
K_DEBUG("scheduler unlocked (%p:%d)\n",
_current, _current->base.sched_locked);
_reschedule_irqlock(irq_lock());
_reschedule_unlocked();
#endif
}
@ -859,13 +859,7 @@ void _impl_k_yield(void)
}
}
#ifdef CONFIG_SMP
(void)_Swap_irqlock(irq_lock());
#else
if (_get_next_ready_thread() != _current) {
(void)_Swap_irqlock(irq_lock());
}
#endif
_Swap_unlocked();
}
#ifdef CONFIG_USERSPACE

View file

@ -84,9 +84,8 @@ static void smp_init_top(int key, void *arg)
};
_arch_curr_cpu()->current = &dummy_thread;
unsigned int k = irq_lock();
smp_timer_init();
(void)_Swap(k);
_Swap_unlocked();
CODE_UNREACHABLE;
}

View file

@ -141,7 +141,7 @@ void stack_thread2(void)
/* Test that stack overflow check due to swap works */
blow_up_stack();
TC_PRINT("swapping...\n");
_Swap_irqlock(irq_lock());
_Swap_unlocked();
TC_ERROR("should never see this\n");
rv = TC_FAIL;
irq_unlock(key);