kernel: Split reschdule & pend into irq/spin lock versions
Just like with _Swap(), we need two variants of these utilities which can atomically release a lock and context switch. The naming shifts (for byte count reasons) to _reschedule/_pend_curr, and both have an _irqlock variant which takes the traditional locking. Just refactoring. No logic changes. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
fb505b3cfd
commit
ec554f44d9
22 changed files with 87 additions and 66 deletions
|
@ -49,5 +49,5 @@ void _impl_k_thread_abort(k_tid_t thread)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The abort handler might have altered the ready queue. */
|
/* The abort handler might have altered the ready queue. */
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
}
|
}
|
||||||
|
|
|
@ -531,7 +531,7 @@ void _impl_k_thread_abort(k_tid_t thread)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The abort handler might have altered the ready queue. */
|
/* The abort handler might have altered the ready queue. */
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -38,9 +38,12 @@ void _move_thread_to_end_of_prio_q(struct k_thread *thread);
|
||||||
void _remove_thread_from_ready_q(struct k_thread *thread);
|
void _remove_thread_from_ready_q(struct k_thread *thread);
|
||||||
int _is_thread_time_slicing(struct k_thread *thread);
|
int _is_thread_time_slicing(struct k_thread *thread);
|
||||||
void _unpend_thread_no_timeout(struct k_thread *thread);
|
void _unpend_thread_no_timeout(struct k_thread *thread);
|
||||||
int _pend_current_thread(u32_t key, _wait_q_t *wait_q, s32_t timeout);
|
int _pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
|
||||||
|
_wait_q_t *wait_q, s32_t timeout);
|
||||||
|
int _pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout);
|
||||||
void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout);
|
void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout);
|
||||||
void _reschedule(u32_t key);
|
void _reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
|
||||||
|
void _reschedule_irqlock(u32_t key);
|
||||||
struct k_thread *_unpend_first_thread(_wait_q_t *wait_q);
|
struct k_thread *_unpend_first_thread(_wait_q_t *wait_q);
|
||||||
void _unpend_thread(struct k_thread *thread);
|
void _unpend_thread(struct k_thread *thread);
|
||||||
int _unpend_all(_wait_q_t *wait_q);
|
int _unpend_all(_wait_q_t *wait_q);
|
||||||
|
@ -62,7 +65,6 @@ static ALWAYS_INLINE struct k_thread *_get_next_ready_thread(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
static inline bool _is_idle_thread(void *entry_point)
|
static inline bool _is_idle_thread(void *entry_point)
|
||||||
{
|
{
|
||||||
return entry_point == idle;
|
return entry_point == idle;
|
||||||
|
|
|
@ -220,7 +220,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
|
||||||
_set_thread_return_value(sending_thread, 0);
|
_set_thread_return_value(sending_thread, 0);
|
||||||
_mark_thread_as_not_pending(sending_thread);
|
_mark_thread_as_not_pending(sending_thread);
|
||||||
_ready_thread(sending_thread);
|
_ready_thread(sending_thread);
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -276,7 +276,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
||||||
*/
|
*/
|
||||||
if ((sending_thread->base.thread_state & _THREAD_DUMMY)
|
if ((sending_thread->base.thread_state & _THREAD_DUMMY)
|
||||||
!= 0) {
|
!= 0) {
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -285,7 +285,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
||||||
* synchronous send: pend current thread (unqueued)
|
* synchronous send: pend current thread (unqueued)
|
||||||
* until the receiver consumes the message
|
* until the receiver consumes the message
|
||||||
*/
|
*/
|
||||||
return _pend_current_thread(key, NULL, K_FOREVER);
|
return _pend_curr_irqlock(key, NULL, K_FOREVER);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -306,7 +306,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* synchronous send: sender waits on tx queue for receiver or timeout */
|
/* synchronous send: sender waits on tx queue for receiver or timeout */
|
||||||
return _pend_current_thread(key, &mbox->tx_msg_queue, timeout);
|
return _pend_curr_irqlock(key, &mbox->tx_msg_queue, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, s32_t timeout)
|
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, s32_t timeout)
|
||||||
|
@ -458,7 +458,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
|
||||||
|
|
||||||
/* wait until a matching sender appears or a timeout occurs */
|
/* wait until a matching sender appears or a timeout occurs */
|
||||||
_current->base.swap_data = rx_msg;
|
_current->base.swap_data = rx_msg;
|
||||||
result = _pend_current_thread(key, &mbox->rx_msg_queue, timeout);
|
result = _pend_curr_irqlock(key, &mbox->rx_msg_queue, timeout);
|
||||||
|
|
||||||
/* consume message data immediately, if needed */
|
/* consume message data immediately, if needed */
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
|
|
|
@ -109,7 +109,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout)
|
||||||
result = -ENOMEM;
|
result = -ENOMEM;
|
||||||
} else {
|
} else {
|
||||||
/* wait for a free block or timeout */
|
/* wait for a free block or timeout */
|
||||||
result = _pend_current_thread(key, &slab->wait_q, timeout);
|
result = _pend_curr_irqlock(key, &slab->wait_q, timeout);
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
*mem = _current->base.swap_data;
|
*mem = _current->base.swap_data;
|
||||||
}
|
}
|
||||||
|
@ -129,7 +129,7 @@ void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
|
||||||
if (pending_thread != NULL) {
|
if (pending_thread != NULL) {
|
||||||
_set_thread_return_value_with_data(pending_thread, 0, *mem);
|
_set_thread_return_value_with_data(pending_thread, 0, *mem);
|
||||||
_ready_thread(pending_thread);
|
_ready_thread(pending_thread);
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
} else {
|
} else {
|
||||||
**(char ***)mem = slab->free_list;
|
**(char ***)mem = slab->free_list;
|
||||||
slab->free_list = *(char **)mem;
|
slab->free_list = *(char **)mem;
|
||||||
|
|
|
@ -90,7 +90,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
(void)_pend_current_thread(irq_lock(), &p->wait_q, timeout);
|
(void)_pend_curr_irqlock(irq_lock(), &p->wait_q, timeout);
|
||||||
|
|
||||||
if (timeout != K_FOREVER) {
|
if (timeout != K_FOREVER) {
|
||||||
timeout = end - z_tick_get();
|
timeout = end - z_tick_get();
|
||||||
|
@ -119,7 +119,7 @@ void k_mem_pool_free_id(struct k_mem_block_id *id)
|
||||||
need_sched = _unpend_all(&p->wait_q);
|
need_sched = _unpend_all(&p->wait_q);
|
||||||
|
|
||||||
if (need_sched && !_is_in_isr()) {
|
if (need_sched && !_is_in_isr()) {
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
} else {
|
} else {
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,7 +127,7 @@ int _impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
|
||||||
/* wake up waiting thread */
|
/* wake up waiting thread */
|
||||||
_set_thread_return_value(pending_thread, 0);
|
_set_thread_return_value(pending_thread, 0);
|
||||||
_ready_thread(pending_thread);
|
_ready_thread(pending_thread);
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
/* put message in queue */
|
/* put message in queue */
|
||||||
|
@ -145,7 +145,7 @@ int _impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
|
||||||
} else {
|
} else {
|
||||||
/* wait for put message success, failure, or timeout */
|
/* wait for put message success, failure, or timeout */
|
||||||
_current->base.swap_data = data;
|
_current->base.swap_data = data;
|
||||||
return _pend_current_thread(key, &q->wait_q, timeout);
|
return _pend_curr_irqlock(key, &q->wait_q, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
|
@ -216,7 +216,7 @@ int _impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
|
||||||
/* wake up waiting thread */
|
/* wake up waiting thread */
|
||||||
_set_thread_return_value(pending_thread, 0);
|
_set_thread_return_value(pending_thread, 0);
|
||||||
_ready_thread(pending_thread);
|
_ready_thread(pending_thread);
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
result = 0;
|
result = 0;
|
||||||
|
@ -226,7 +226,7 @@ int _impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
|
||||||
} else {
|
} else {
|
||||||
/* wait for get message success or timeout */
|
/* wait for get message success or timeout */
|
||||||
_current->base.swap_data = data;
|
_current->base.swap_data = data;
|
||||||
return _pend_current_thread(key, &q->wait_q, timeout);
|
return _pend_curr_irqlock(key, &q->wait_q, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
|
@ -291,7 +291,7 @@ void _impl_k_msgq_purge(struct k_msgq *q)
|
||||||
q->used_msgs = 0;
|
q->used_msgs = 0;
|
||||||
q->read_ptr = q->write_ptr;
|
q->read_ptr = q->write_ptr;
|
||||||
|
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
|
|
@ -162,7 +162,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
|
||||||
adjust_owner_prio(mutex, new_prio);
|
adjust_owner_prio(mutex, new_prio);
|
||||||
}
|
}
|
||||||
|
|
||||||
s32_t got_mutex = _pend_current_thread(key, &mutex->wait_q, timeout);
|
s32_t got_mutex = _pend_curr_irqlock(key, &mutex->wait_q, timeout);
|
||||||
|
|
||||||
K_DEBUG("on mutex %p got_mutex value: %d\n", mutex, got_mutex);
|
K_DEBUG("on mutex %p got_mutex value: %d\n", mutex, got_mutex);
|
||||||
|
|
||||||
|
|
|
@ -548,7 +548,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
|
||||||
|
|
||||||
_pend_thread((struct k_thread *) &async_desc->thread,
|
_pend_thread((struct k_thread *) &async_desc->thread,
|
||||||
&pipe->wait_q.writers, K_FOREVER);
|
&pipe->wait_q.writers, K_FOREVER);
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -566,7 +566,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
|
||||||
*/
|
*/
|
||||||
key = irq_lock();
|
key = irq_lock();
|
||||||
_sched_unlock_no_reschedule();
|
_sched_unlock_no_reschedule();
|
||||||
(void)_pend_current_thread(key, &pipe->wait_q.writers, timeout);
|
(void)_pend_curr_irqlock(key, &pipe->wait_q.writers, timeout);
|
||||||
} else {
|
} else {
|
||||||
k_sched_unlock();
|
k_sched_unlock();
|
||||||
}
|
}
|
||||||
|
@ -708,7 +708,7 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
|
||||||
_current->base.swap_data = &pipe_desc;
|
_current->base.swap_data = &pipe_desc;
|
||||||
key = irq_lock();
|
key = irq_lock();
|
||||||
_sched_unlock_no_reschedule();
|
_sched_unlock_no_reschedule();
|
||||||
(void)_pend_current_thread(key, &pipe->wait_q.readers, timeout);
|
(void)_pend_curr_irqlock(key, &pipe->wait_q.readers, timeout);
|
||||||
} else {
|
} else {
|
||||||
k_sched_unlock();
|
k_sched_unlock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -231,7 +231,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
|
||||||
|
|
||||||
_wait_q_t wait_q = _WAIT_Q_INIT(&wait_q);
|
_wait_q_t wait_q = _WAIT_Q_INIT(&wait_q);
|
||||||
|
|
||||||
int swap_rc = _pend_current_thread(key, &wait_q, timeout);
|
int swap_rc = _pend_curr_irqlock(key, &wait_q, timeout);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear all event registrations. If events happen while we're in this
|
* Clear all event registrations. If events happen while we're in this
|
||||||
|
@ -424,7 +424,7 @@ int _impl_k_poll_signal_raise(struct k_poll_signal *signal, int result)
|
||||||
|
|
||||||
int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
|
int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
|
||||||
|
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -135,7 +135,7 @@ void _impl_k_queue_cancel_wait(struct k_queue *queue)
|
||||||
handle_poll_events(queue, K_POLL_STATE_CANCELLED);
|
handle_poll_events(queue, K_POLL_STATE_CANCELLED);
|
||||||
#endif /* !CONFIG_POLL */
|
#endif /* !CONFIG_POLL */
|
||||||
|
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
@ -154,7 +154,7 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
|
||||||
|
|
||||||
if (first_pending_thread != NULL) {
|
if (first_pending_thread != NULL) {
|
||||||
prepare_thread_to_run(first_pending_thread, data);
|
prepare_thread_to_run(first_pending_thread, data);
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif /* !CONFIG_POLL */
|
#endif /* !CONFIG_POLL */
|
||||||
|
@ -179,7 +179,7 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
|
||||||
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
|
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
|
||||||
#endif /* CONFIG_POLL */
|
#endif /* CONFIG_POLL */
|
||||||
|
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,7 +257,7 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
||||||
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
|
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
|
||||||
#endif /* !CONFIG_POLL */
|
#endif /* !CONFIG_POLL */
|
||||||
|
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
|
void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
|
||||||
|
@ -346,7 +346,7 @@ void *_impl_k_queue_get(struct k_queue *queue, s32_t timeout)
|
||||||
return k_queue_poll(queue, timeout);
|
return k_queue_poll(queue, timeout);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
int ret = _pend_current_thread(key, &queue->wait_q, timeout);
|
int ret = _pend_curr_irqlock(key, &queue->wait_q, timeout);
|
||||||
|
|
||||||
return (ret != 0) ? NULL : _current->base.swap_data;
|
return (ret != 0) ? NULL : _current->base.swap_data;
|
||||||
#endif /* CONFIG_POLL */
|
#endif /* CONFIG_POLL */
|
||||||
|
|
|
@ -428,7 +428,7 @@ void z_thread_timeout(struct _timeout *to)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int _pend_current_thread(u32_t key, _wait_q_t *wait_q, s32_t timeout)
|
int _pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
|
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
|
||||||
pending_current = _current;
|
pending_current = _current;
|
||||||
|
@ -437,6 +437,16 @@ int _pend_current_thread(u32_t key, _wait_q_t *wait_q, s32_t timeout)
|
||||||
return _Swap_irqlock(key);
|
return _Swap_irqlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int _pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
|
||||||
|
_wait_q_t *wait_q, s32_t timeout)
|
||||||
|
{
|
||||||
|
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
|
||||||
|
pending_current = _current;
|
||||||
|
#endif
|
||||||
|
pend(_current, wait_q, timeout);
|
||||||
|
return _Swap(lock, key);
|
||||||
|
}
|
||||||
|
|
||||||
struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
|
struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
|
||||||
{
|
{
|
||||||
struct k_thread *t = _unpend1_no_timeout(wait_q);
|
struct k_thread *t = _unpend1_no_timeout(wait_q);
|
||||||
|
@ -480,29 +490,38 @@ void _thread_priority_set(struct k_thread *thread, int prio)
|
||||||
sys_trace_thread_priority_set(thread);
|
sys_trace_thread_priority_set(thread);
|
||||||
|
|
||||||
if (need_sched) {
|
if (need_sched) {
|
||||||
_reschedule(irq_lock());
|
_reschedule_irqlock(irq_lock());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void _reschedule(u32_t key)
|
static inline int resched(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (!_current_cpu->swap_ok) {
|
if (!_current_cpu->swap_ok) {
|
||||||
goto noswap;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
_current_cpu->swap_ok = 0;
|
_current_cpu->swap_ok = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (_is_in_isr()) {
|
return !_is_in_isr();
|
||||||
goto noswap;
|
}
|
||||||
|
|
||||||
|
void _reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
|
||||||
|
{
|
||||||
|
if (resched()) {
|
||||||
|
_Swap(lock, key);
|
||||||
|
} else {
|
||||||
|
k_spin_unlock(lock, key);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
(void)_Swap_irqlock(key);
|
void _reschedule_irqlock(u32_t key)
|
||||||
return;
|
{
|
||||||
|
if (resched()) {
|
||||||
noswap:
|
_Swap_irqlock(key);
|
||||||
irq_unlock(key);
|
} else {
|
||||||
|
irq_unlock(key);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void k_sched_lock(void)
|
void k_sched_lock(void)
|
||||||
|
@ -526,7 +545,7 @@ void k_sched_unlock(void)
|
||||||
K_DEBUG("scheduler unlocked (%p:%d)\n",
|
K_DEBUG("scheduler unlocked (%p:%d)\n",
|
||||||
_current, _current->base.sched_locked);
|
_current, _current->base.sched_locked);
|
||||||
|
|
||||||
_reschedule(irq_lock());
|
_reschedule_irqlock(irq_lock());
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -922,7 +941,7 @@ void _impl_k_wakeup(k_tid_t thread)
|
||||||
if (_is_in_isr()) {
|
if (_is_in_isr()) {
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
} else {
|
} else {
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -119,7 +119,7 @@ void _impl_k_sem_give(struct k_sem *sem)
|
||||||
sys_trace_void(SYS_TRACE_ID_SEMA_GIVE);
|
sys_trace_void(SYS_TRACE_ID_SEMA_GIVE);
|
||||||
do_sem_give(sem);
|
do_sem_give(sem);
|
||||||
sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE);
|
sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE);
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
@ -148,7 +148,7 @@ int _impl_k_sem_take(struct k_sem *sem, s32_t timeout)
|
||||||
|
|
||||||
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
|
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
|
||||||
|
|
||||||
return _pend_current_thread(key, &sem->wait_q, timeout);
|
return _pend_curr_irqlock(key, &sem->wait_q, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
|
|
@ -112,7 +112,7 @@ void _impl_k_stack_push(struct k_stack *stack, u32_t data)
|
||||||
|
|
||||||
_set_thread_return_value_with_data(first_pending_thread,
|
_set_thread_return_value_with_data(first_pending_thread,
|
||||||
0, (void *)data);
|
0, (void *)data);
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
*(stack->next) = data;
|
*(stack->next) = data;
|
||||||
|
@ -155,7 +155,7 @@ int _impl_k_stack_pop(struct k_stack *stack, u32_t *data, s32_t timeout)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
result = _pend_current_thread(key, &stack->wait_q, timeout);
|
result = _pend_curr_irqlock(key, &stack->wait_q, timeout);
|
||||||
if (result == -EAGAIN) {
|
if (result == -EAGAIN) {
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
|
@ -271,7 +271,7 @@ void _impl_k_thread_start(struct k_thread *thread)
|
||||||
|
|
||||||
_mark_thread_as_started(thread);
|
_mark_thread_as_started(thread);
|
||||||
_ready_thread(thread);
|
_ready_thread(thread);
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
@ -541,7 +541,7 @@ void _impl_k_thread_suspend(struct k_thread *thread)
|
||||||
sys_trace_thread_suspend(thread);
|
sys_trace_thread_suspend(thread);
|
||||||
|
|
||||||
if (thread == _current) {
|
if (thread == _current) {
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
} else {
|
} else {
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
}
|
}
|
||||||
|
@ -564,7 +564,7 @@ void _impl_k_thread_resume(struct k_thread *thread)
|
||||||
_k_thread_single_resume(thread);
|
_k_thread_single_resume(thread);
|
||||||
|
|
||||||
sys_trace_thread_resume(thread);
|
sys_trace_thread_resume(thread);
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
|
|
@ -37,7 +37,7 @@ void _impl_k_thread_abort(k_tid_t thread)
|
||||||
_k_thread_single_abort(thread);
|
_k_thread_single_abort(thread);
|
||||||
_thread_monitor_exit(thread);
|
_thread_monitor_exit(thread);
|
||||||
|
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -172,7 +172,7 @@ void _impl_k_timer_stop(struct k_timer *timer)
|
||||||
if (_is_in_isr()) {
|
if (_is_in_isr()) {
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
} else {
|
} else {
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ u32_t _impl_k_timer_status_sync(struct k_timer *timer)
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
if (!_is_inactive_timeout(&timer->timeout)) {
|
if (!_is_inactive_timeout(&timer->timeout)) {
|
||||||
/* wait for timer to expire or stop */
|
/* wait for timer to expire or stop */
|
||||||
(void)_pend_current_thread(key, &timer->wait_q, K_FOREVER);
|
(void)_pend_curr_irqlock(key, &timer->wait_q, K_FOREVER);
|
||||||
|
|
||||||
/* get updated timer status */
|
/* get updated timer status */
|
||||||
key = irq_lock();
|
key = irq_lock();
|
||||||
|
|
|
@ -24,9 +24,9 @@ int pthread_barrier_wait(pthread_barrier_t *b)
|
||||||
while (_waitq_head(&b->wait_q)) {
|
while (_waitq_head(&b->wait_q)) {
|
||||||
_ready_one_thread(&b->wait_q);
|
_ready_one_thread(&b->wait_q);
|
||||||
}
|
}
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
return _pend_current_thread(key, &b->wait_q, K_FOREVER);
|
return _pend_curr_irqlock(key, &b->wait_q, K_FOREVER);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@ static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut, int timeout)
|
||||||
mut->lock_count = 0;
|
mut->lock_count = 0;
|
||||||
mut->owner = NULL;
|
mut->owner = NULL;
|
||||||
_ready_one_thread(&mut->wait_q);
|
_ready_one_thread(&mut->wait_q);
|
||||||
ret = _pend_current_thread(key, &cv->wait_q, timeout);
|
ret = _pend_curr_irqlock(key, &cv->wait_q, timeout);
|
||||||
|
|
||||||
/* FIXME: this extra lock (and the potential context switch it
|
/* FIXME: this extra lock (and the potential context switch it
|
||||||
* can cause) could be optimized out. At the point of the
|
* can cause) could be optimized out. At the point of the
|
||||||
|
@ -49,7 +49,7 @@ int pthread_cond_signal(pthread_cond_t *cv)
|
||||||
int key = irq_lock();
|
int key = irq_lock();
|
||||||
|
|
||||||
_ready_one_thread(&cv->wait_q);
|
_ready_one_thread(&cv->wait_q);
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ int pthread_cond_broadcast(pthread_cond_t *cv)
|
||||||
_ready_one_thread(&cv->wait_q);
|
_ready_one_thread(&cv->wait_q);
|
||||||
}
|
}
|
||||||
|
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,7 @@ static int acquire_mutex(pthread_mutex_t *m, int timeout)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = _pend_current_thread(key, &m->wait_q, timeout);
|
rc = _pend_curr_irqlock(key, &m->wait_q, timeout);
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
rc = ETIMEDOUT;
|
rc = ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
@ -141,7 +141,7 @@ int pthread_mutex_unlock(pthread_mutex_t *m)
|
||||||
m->lock_count++;
|
m->lock_count++;
|
||||||
_ready_thread(thread);
|
_ready_thread(thread);
|
||||||
_set_thread_return_value(thread, 0);
|
_set_thread_return_value(thread, 0);
|
||||||
_reschedule(key);
|
_reschedule_irqlock(key);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
m->owner = NULL;
|
m->owner = NULL;
|
||||||
|
|
|
@ -6,13 +6,13 @@ latencies (not scaling performance) of specific low level scheduling
|
||||||
primitives independent of overhead from application or API
|
primitives independent of overhead from application or API
|
||||||
abstractions. It works very simply: a main thread creates a "partner"
|
abstractions. It works very simply: a main thread creates a "partner"
|
||||||
thread at a higher priority, the partner then sleeps using
|
thread at a higher priority, the partner then sleeps using
|
||||||
_pend_current_thread(). From this initial state:
|
_pend_curr_irqlock(). From this initial state:
|
||||||
|
|
||||||
1. The main thread calls _unpend_first_thread()
|
1. The main thread calls _unpend_first_thread()
|
||||||
2. The main thread calls _ready_thread()
|
2. The main thread calls _ready_thread()
|
||||||
3. The main thread calls k_yield()
|
3. The main thread calls k_yield()
|
||||||
(the kernel switches to the partner thread)
|
(the kernel switches to the partner thread)
|
||||||
4. The partner thread then runs and calls _pend_current_thread() again
|
4. The partner thread then runs and calls _pend_curr_irqlock() again
|
||||||
(the kernel switches to the main thread)
|
(the kernel switches to the main thread)
|
||||||
5. The main thread returns from k_yield()
|
5. The main thread returns from k_yield()
|
||||||
|
|
||||||
|
|
|
@ -13,14 +13,14 @@
|
||||||
* of specific low level scheduling primitives independent of overhead
|
* of specific low level scheduling primitives independent of overhead
|
||||||
* from application or API abstractions. It works very simply: a main
|
* from application or API abstractions. It works very simply: a main
|
||||||
* thread creates a "partner" thread at a higher priority, the partner
|
* thread creates a "partner" thread at a higher priority, the partner
|
||||||
* then sleeps using _pend_current_thread(). From this initial
|
* then sleeps using _pend_curr_irqlock(). From this initial
|
||||||
* state:
|
* state:
|
||||||
*
|
*
|
||||||
* 1. The main thread calls _unpend_first_thread()
|
* 1. The main thread calls _unpend_first_thread()
|
||||||
* 2. The main thread calls _ready_thread()
|
* 2. The main thread calls _ready_thread()
|
||||||
* 3. The main thread calls k_yield()
|
* 3. The main thread calls k_yield()
|
||||||
* (the kernel switches to the partner thread)
|
* (the kernel switches to the partner thread)
|
||||||
* 4. The partner thread then runs and calls _pend_current_thread() again
|
* 4. The partner thread then runs and calls _pend_curr_irqlock() again
|
||||||
* (the kernel switches to the main thread)
|
* (the kernel switches to the main thread)
|
||||||
* 5. The main thread returns from k_yield()
|
* 5. The main thread returns from k_yield()
|
||||||
*
|
*
|
||||||
|
@ -90,7 +90,7 @@ static void partner_fn(void *arg1, void *arg2, void *arg3)
|
||||||
while (true) {
|
while (true) {
|
||||||
unsigned int key = irq_lock();
|
unsigned int key = irq_lock();
|
||||||
|
|
||||||
_pend_current_thread(key, &waitq, K_FOREVER);
|
_pend_curr_irqlock(key, &waitq, K_FOREVER);
|
||||||
stamp(PARTNER_AWAKE_PENDING);
|
stamp(PARTNER_AWAKE_PENDING);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue