kernel/pipe: Spinlockify
One spinlock per pipe object. Also removed some vestigial locking around _ready_thread(). That call is internally synchronized now. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
d27d4e6af2
commit
f582b55dd6
2 changed files with 16 additions and 20 deletions
|
@ -3631,6 +3631,7 @@ struct k_pipe {
|
|||
size_t bytes_used; /**< # bytes used in buffer */
|
||||
size_t read_index; /**< Where in buffer to read from */
|
||||
size_t write_index; /**< Where in buffer to write */
|
||||
struct k_spinlock lock; /**< Synchronization lock */
|
||||
|
||||
struct {
|
||||
_wait_q_t readers; /**< Reader wait queue */
|
||||
|
|
|
@ -413,8 +413,6 @@ static int pipe_return_code(size_t min_xfer, size_t bytes_remaining,
|
|||
*/
|
||||
static void pipe_thread_ready(struct k_thread *thread)
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
|
||||
if ((thread->base.thread_state & _THREAD_DUMMY) != 0) {
|
||||
pipe_async_finish((struct k_pipe_async *)thread);
|
||||
|
@ -422,9 +420,7 @@ static void pipe_thread_ready(struct k_thread *thread)
|
|||
}
|
||||
#endif
|
||||
|
||||
key = irq_lock();
|
||||
_ready_thread(thread);
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -438,7 +434,6 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
|
|||
struct k_thread *reader;
|
||||
struct k_pipe_desc *desc;
|
||||
sys_dlist_t xfer_list;
|
||||
unsigned int key;
|
||||
size_t num_bytes_written = 0;
|
||||
size_t bytes_copied;
|
||||
|
||||
|
@ -446,7 +441,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
|
|||
ARG_UNUSED(async_desc);
|
||||
#endif
|
||||
|
||||
key = irq_lock();
|
||||
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
|
||||
|
||||
/*
|
||||
* Create a list of "working readers" into which the data will be
|
||||
|
@ -456,13 +451,13 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
|
|||
if (!pipe_xfer_prepare(&xfer_list, &reader, &pipe->wait_q.readers,
|
||||
pipe->size - pipe->bytes_used, bytes_to_write,
|
||||
min_xfer, timeout)) {
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&pipe->lock, key);
|
||||
*bytes_written = 0;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
_sched_lock();
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&pipe->lock, key);
|
||||
|
||||
/*
|
||||
* 1. 'xfer_list' currently contains a list of reader threads that can
|
||||
|
@ -489,9 +484,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
|
|||
desc->bytes_to_xfer -= bytes_copied;
|
||||
|
||||
/* The thread's read request has been satisfied. Ready it. */
|
||||
key = irq_lock();
|
||||
_ready_thread(thread);
|
||||
irq_unlock(key);
|
||||
|
||||
thread = (struct k_thread *)sys_dlist_get(&xfer_list);
|
||||
}
|
||||
|
@ -539,7 +532,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
|
|||
* Lock interrupts and unlock the scheduler before
|
||||
* manipulating the writers wait_q.
|
||||
*/
|
||||
key = irq_lock();
|
||||
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
|
||||
_sched_unlock_no_reschedule();
|
||||
|
||||
async_desc->desc.buffer = data + num_bytes_written;
|
||||
|
@ -548,7 +541,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
|
|||
|
||||
_pend_thread((struct k_thread *) &async_desc->thread,
|
||||
&pipe->wait_q.writers, K_FOREVER);
|
||||
_reschedule_irqlock(key);
|
||||
_reschedule(&pipe->lock, key);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -564,9 +557,10 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
|
|||
* Lock interrupts and unlock the scheduler before
|
||||
* manipulating the writers wait_q.
|
||||
*/
|
||||
key = irq_lock();
|
||||
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
|
||||
_sched_unlock_no_reschedule();
|
||||
(void)_pend_curr_irqlock(key, &pipe->wait_q.writers, timeout);
|
||||
(void)_pend_curr(&pipe->lock, key,
|
||||
&pipe->wait_q.writers, timeout);
|
||||
} else {
|
||||
k_sched_unlock();
|
||||
}
|
||||
|
@ -583,14 +577,13 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
|
|||
struct k_thread *writer;
|
||||
struct k_pipe_desc *desc;
|
||||
sys_dlist_t xfer_list;
|
||||
unsigned int key;
|
||||
size_t num_bytes_read = 0;
|
||||
size_t bytes_copied;
|
||||
|
||||
__ASSERT(min_xfer <= bytes_to_read, "");
|
||||
__ASSERT(bytes_read != NULL, "");
|
||||
|
||||
key = irq_lock();
|
||||
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
|
||||
|
||||
/*
|
||||
* Create a list of "working readers" into which the data will be
|
||||
|
@ -600,13 +593,13 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
|
|||
if (!pipe_xfer_prepare(&xfer_list, &writer, &pipe->wait_q.writers,
|
||||
pipe->bytes_used, bytes_to_read,
|
||||
min_xfer, timeout)) {
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&pipe->lock, key);
|
||||
*bytes_read = 0;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
_sched_lock();
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&pipe->lock, key);
|
||||
|
||||
num_bytes_read = pipe_buffer_get(pipe, data, bytes_to_read);
|
||||
|
||||
|
@ -706,9 +699,11 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
|
|||
|
||||
if (timeout != K_NO_WAIT) {
|
||||
_current->base.swap_data = &pipe_desc;
|
||||
key = irq_lock();
|
||||
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
|
||||
|
||||
_sched_unlock_no_reschedule();
|
||||
(void)_pend_curr_irqlock(key, &pipe->wait_q.readers, timeout);
|
||||
(void)_pend_curr(&pipe->lock, key,
|
||||
&pipe->wait_q.readers, timeout);
|
||||
} else {
|
||||
k_sched_unlock();
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue