device: fix race condition in device_sync if ISR runs before thread waits

If an ISR was enabled and ran before the thread that wants to wait for
completion actually does wait, the type of thread would not be
registered in the object. The ISR would thus not know what type of
semaphore to release. This caused the ISR to release the object for the
previous caller of device_sync_call_wait().

Instead, the ISR now looks if there is already a waiter: if not, it
marks the device as being ready in the object, which allows the thread
to skip taking the semaphore; if there is a waiter, the correct
semaphore type is released, as before.

Change-Id: Ib97480db8ba3e895812cf4bc209d9794639af325
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2016-02-05 11:51:55 -05:00 committed by Gerrit Code Review
commit 48e4f82f3c

View file

@ -186,6 +186,13 @@ struct device* device_get_binding(char *name);
#include <microkernel.h>
#endif
#ifdef CONFIG_MICROKERNEL
enum device_sync_waiter {
DEVICE_SYNC_WAITER_NONE,
DEVICE_SYNC_WAITER_FIBER,
DEVICE_SYNC_WAITER_TASK,
};
#endif
/**
* Specific type for synchronizing calls among the 2 possible contexts
@ -197,7 +204,8 @@ typedef struct {
/** Microkernel semaphore used for task context */
struct _k_sem_struct _t_sem;
ksem_t t_sem;
bool waiter_is_task;
enum device_sync_waiter waiter;
bool device_ready;
#endif
} device_sync_call_t;
@ -214,12 +222,25 @@ static inline void device_sync_call_init(device_sync_call_t *sync)
sync->_t_sem.waiters = NULL;
sync->_t_sem.level = sync->_t_sem.count = 0;
sync->t_sem = (ksem_t)&sync->_t_sem;
sync->waiter_is_task = false;
sync->waiter = DEVICE_SYNC_WAITER_NONE;
sync->device_ready = false;
#endif
}
#ifdef CONFIG_MICROKERNEL
/*
* The idle task cannot block and is used during boot, and thus polls a
* nanokernel semaphore instead of waiting on a microkernel semaphore.
*/
static inline bool _is_blocking_task(void)
{
bool is_task = sys_execution_context_type_get() == NANO_CTX_TASK;
bool is_idle_task = task_priority_get() == (CONFIG_NUM_TASK_PRIORITIES - 1);
return is_task && !is_idle_task;
}
/**
* @brief Wait for the isr to complete the synchronous call
* Note: In a microkernel built this function will take care of the caller
@ -229,14 +250,32 @@ static inline void device_sync_call_init(device_sync_call_t *sync)
*/
static inline void device_sync_call_wait(device_sync_call_t *sync)
{
if ((sys_execution_context_type_get() == NANO_CTX_TASK) &&
(task_priority_get() < CONFIG_NUM_TASK_PRIORITIES - 1)) {
sync->waiter_is_task = true;
/* protect the state of device_ready and waiter fields */
int key = irq_lock();
if (sync->device_ready) {
sync->device_ready = false;
/*
* If device_ready was set, the waiter field had to be NONE, so we
* don't have to reset it.
*/
irq_unlock(key);
return;
}
if (_is_blocking_task()) {
sync->waiter = DEVICE_SYNC_WAITER_TASK;
irq_unlock(key);
task_sem_take(sync->t_sem, TICKS_UNLIMITED);
} else {
sync->waiter_is_task = false;
sync->waiter = DEVICE_SYNC_WAITER_FIBER;
irq_unlock(key);
nano_sem_take(&sync->f_sem, TICKS_UNLIMITED);
}
sync->waiter = DEVICE_SYNC_WAITER_NONE;
/* if we get here, device_ready was not set: we don't have to reset it */
}
/**
@ -253,9 +292,26 @@ static inline void device_sync_call_complete(device_sync_call_t *sync)
fiber_sem_give,
task_sem_give
};
if (sync->waiter_is_task) {
/* protect the state of device_ready and waiter fields */
int key = irq_lock();
if (sync->waiter == DEVICE_SYNC_WAITER_NONE) {
sync->device_ready = true;
irq_unlock(key);
return;
}
/*
* It's safe to unlock interrupts here since we know there was a waiter,
* and only one thread is allowed to wait on the object, so the state of
* waiter will not change and the device_ready flag will not get set.
*/
irq_unlock(key);
if (sync->waiter == DEVICE_SYNC_WAITER_TASK) {
func[sys_execution_context_type_get()](sync->t_sem);
} else {
} else /* fiber */ {
nano_sem_give(&sync->f_sem);
}
}