kernel/poll: Spinlockify
Poll gets a single subsystem lock for now. The existing locking in Ben's code is subtle, being used both for latency control and for critical section protection. So getting each k_poll_event to use a separate lock will require care and a little logic change. Do the simple version for now, which still works to decouple it from the global lock. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
1bf9bd04b1
commit
f2b1a4bb64
1 changed files with 29 additions and 19 deletions
|
@ -26,6 +26,15 @@
|
|||
#include <misc/__assert.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
/* Single subsystem lock. Locking per-event would be better on highly
|
||||
* contended SMP systems, but the original locking scheme here is
|
||||
* subtle (it relies on releasing/reacquiring the lock in areas for
|
||||
* latency control and it's sometimes hard to see exactly what data is
|
||||
* "inside" a given critical section). Do the synchronization port
|
||||
* later as an optimization.
|
||||
*/
|
||||
static struct k_spinlock lock;
|
||||
|
||||
void k_poll_event_init(struct k_poll_event *event, u32_t type,
|
||||
int mode, void *obj)
|
||||
{
|
||||
|
@ -164,12 +173,12 @@ static inline void clear_event_registration(struct k_poll_event *event)
|
|||
/* must be called with interrupts locked */
|
||||
static inline void clear_event_registrations(struct k_poll_event *events,
|
||||
int last_registered,
|
||||
unsigned int key)
|
||||
k_spinlock_key_t key)
|
||||
{
|
||||
for (; last_registered >= 0; last_registered--) {
|
||||
clear_event_registration(&events[last_registered]);
|
||||
irq_unlock(key);
|
||||
key = irq_lock();
|
||||
k_spin_unlock(&lock, key);
|
||||
key = k_spin_lock(&lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -186,7 +195,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
|
|||
__ASSERT(num_events > 0, "zero events\n");
|
||||
|
||||
int last_registered = -1, rc;
|
||||
unsigned int key;
|
||||
k_spinlock_key_t key;
|
||||
|
||||
struct _poller poller = { .thread = _current, .is_polling = true, };
|
||||
|
||||
|
@ -194,7 +203,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
|
|||
for (int ii = 0; ii < num_events; ii++) {
|
||||
u32_t state;
|
||||
|
||||
key = irq_lock();
|
||||
key = k_spin_lock(&lock);
|
||||
if (is_condition_met(&events[ii], &state)) {
|
||||
set_event_ready(&events[ii], state);
|
||||
poller.is_polling = false;
|
||||
|
@ -206,10 +215,10 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
|
|||
__ASSERT(false, "unexpected return code\n");
|
||||
}
|
||||
}
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&lock, key);
|
||||
}
|
||||
|
||||
key = irq_lock();
|
||||
key = k_spin_lock(&lock);
|
||||
|
||||
/*
|
||||
* If we're not polling anymore, it means that at least one event
|
||||
|
@ -218,20 +227,20 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
|
|||
*/
|
||||
if (!poller.is_polling) {
|
||||
clear_event_registrations(events, last_registered, key);
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&lock, key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
poller.is_polling = false;
|
||||
|
||||
if (timeout == K_NO_WAIT) {
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&lock, key);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
_wait_q_t wait_q = _WAIT_Q_INIT(&wait_q);
|
||||
|
||||
int swap_rc = _pend_curr_irqlock(key, &wait_q, timeout);
|
||||
int swap_rc = _pend_curr(&lock, key, &wait_q, timeout);
|
||||
|
||||
/*
|
||||
* Clear all event registrations. If events happen while we're in this
|
||||
|
@ -242,9 +251,9 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
|
|||
* added to the list of events that occurred, the user has to check the
|
||||
* return code first, which invalidates the whole list of event states.
|
||||
*/
|
||||
key = irq_lock();
|
||||
key = k_spin_lock(&lock);
|
||||
clear_event_registrations(events, last_registered, key);
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
return swap_rc;
|
||||
}
|
||||
|
@ -252,7 +261,8 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
|
|||
#ifdef CONFIG_USERSPACE
|
||||
Z_SYSCALL_HANDLER(k_poll, events, num_events, timeout)
|
||||
{
|
||||
int ret, key;
|
||||
int ret;
|
||||
k_spinlock_key_t key;
|
||||
struct k_poll_event *events_copy = NULL;
|
||||
unsigned int bounds;
|
||||
|
||||
|
@ -277,13 +287,13 @@ Z_SYSCALL_HANDLER(k_poll, events, num_events, timeout)
|
|||
goto out;
|
||||
}
|
||||
|
||||
key = irq_lock();
|
||||
key = k_spin_lock(&lock);
|
||||
if (Z_SYSCALL_MEMORY_WRITE(events, bounds)) {
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&lock, key);
|
||||
goto oops_free;
|
||||
}
|
||||
(void)memcpy(events_copy, (void *)events, bounds);
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
/* Validate what's inside events_copy */
|
||||
for (int i = 0; i < num_events; i++) {
|
||||
|
@ -410,7 +420,7 @@ Z_SYSCALL_HANDLER(k_poll_signal_check, signal, signaled, result)
|
|||
|
||||
int _impl_k_poll_signal_raise(struct k_poll_signal *signal, int result)
|
||||
{
|
||||
unsigned int key = irq_lock();
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
struct k_poll_event *poll_event;
|
||||
|
||||
signal->result = result;
|
||||
|
@ -418,13 +428,13 @@ int _impl_k_poll_signal_raise(struct k_poll_signal *signal, int result)
|
|||
|
||||
poll_event = (struct k_poll_event *)sys_dlist_get(&signal->poll_events);
|
||||
if (poll_event == NULL) {
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&lock, key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
|
||||
|
||||
_reschedule_irqlock(key);
|
||||
_reschedule(&lock, key);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue