kernel/k_sem: Spinlockify
Switch semaphores to use a subsystem spinlock instead of the system irqlock. Note that this is only "half way there". Semaphores will no longer contend with other irqlock users on SMP systems, but all semaphores are still sharing the same lock. Really we want semaphores to be independently synchronized, but adding 4 bytes to every one (there are a LOT of these things) for a separate spinlock is too much to pay. Rather, a proper SMP-aware implementation would spin on the count variable directly. But let's not rock that boat quite yet. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
ec554f44d9
commit
da37a53a54
1 changed files with 16 additions and 6 deletions
22
kernel/sem.c
22
kernel/sem.c
|
@ -32,6 +32,15 @@
|
||||||
extern struct k_sem _k_sem_list_start[];
|
extern struct k_sem _k_sem_list_start[];
|
||||||
extern struct k_sem _k_sem_list_end[];
|
extern struct k_sem _k_sem_list_end[];
|
||||||
|
|
||||||
|
/* We use a system-wide lock to synchronize semaphores, which has
|
||||||
|
* unfortunate performance impact vs. using a per-object lock
|
||||||
|
* (semaphores are *very* widely used). But per-object locks require
|
||||||
|
* significant extra RAM. A properly spin-aware semaphore
|
||||||
|
* implementation would spin on atomic access to the count variable,
|
||||||
|
* and not a spinlock per se. Useful optimization for the future...
|
||||||
|
*/
|
||||||
|
static struct k_spinlock lock;
|
||||||
|
|
||||||
#ifdef CONFIG_OBJECT_TRACING
|
#ifdef CONFIG_OBJECT_TRACING
|
||||||
|
|
||||||
struct k_sem *_trace_list_k_sem;
|
struct k_sem *_trace_list_k_sem;
|
||||||
|
@ -114,12 +123,12 @@ static void do_sem_give(struct k_sem *sem)
|
||||||
|
|
||||||
void _impl_k_sem_give(struct k_sem *sem)
|
void _impl_k_sem_give(struct k_sem *sem)
|
||||||
{
|
{
|
||||||
u32_t key = irq_lock();
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||||
|
|
||||||
sys_trace_void(SYS_TRACE_ID_SEMA_GIVE);
|
sys_trace_void(SYS_TRACE_ID_SEMA_GIVE);
|
||||||
do_sem_give(sem);
|
do_sem_give(sem);
|
||||||
sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE);
|
sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE);
|
||||||
_reschedule_irqlock(key);
|
_reschedule(&lock, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
@ -131,24 +140,25 @@ int _impl_k_sem_take(struct k_sem *sem, s32_t timeout)
|
||||||
__ASSERT(((_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
|
__ASSERT(((_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
|
||||||
|
|
||||||
sys_trace_void(SYS_TRACE_ID_SEMA_TAKE);
|
sys_trace_void(SYS_TRACE_ID_SEMA_TAKE);
|
||||||
u32_t key = irq_lock();
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||||
|
|
||||||
if (likely(sem->count > 0U)) {
|
if (likely(sem->count > 0U)) {
|
||||||
sem->count--;
|
sem->count--;
|
||||||
irq_unlock(key);
|
k_spin_unlock(&lock, key);
|
||||||
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
|
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (timeout == K_NO_WAIT) {
|
if (timeout == K_NO_WAIT) {
|
||||||
irq_unlock(key);
|
k_spin_unlock(&lock, key);
|
||||||
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
|
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
|
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
|
||||||
|
|
||||||
return _pend_curr_irqlock(key, &sem->wait_q, timeout);
|
int ret = _pend_curr(&lock, key, &sem->wait_q, timeout);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue