kernel: Further unify _reschedule APIs

Now that other work has eliminated the two cases where we had to do a
reschedule "but yield even if we are cooperative", we can squash both
down to a single _reschedule() function which does almost exactly what
legacy _Swap() did, but wrapped as a proper scheduler API.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-04-02 18:40:10 -07:00 committed by Anas Nashif
commit 15cb5d7293
16 changed files with 27 additions and 38 deletions

View file

@ -49,5 +49,5 @@ void _impl_k_thread_abort(k_tid_t thread)
}
/* The abort handler might have altered the ready queue. */
_reschedule_noyield(key);
_reschedule(key);
}

View file

@ -532,7 +532,7 @@ void _impl_k_thread_abort(k_tid_t thread)
}
/* The abort handler might have altered the ready queue. */
_reschedule_noyield(key);
_reschedule(key);
}
#endif

View file

@ -21,8 +21,7 @@ extern k_tid_t const _idle_thread;
extern void _add_thread_to_ready_q(struct k_thread *thread);
extern void _remove_thread_from_ready_q(struct k_thread *thread);
extern int _reschedule_noyield(int key);
extern int _reschedule_yield(int key);
extern int _reschedule(int key);
extern void k_sched_unlock(void);
extern void _pend_thread(struct k_thread *thread,
_wait_q_t *wait_q, s32_t timeout);

View file

@ -218,7 +218,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
_set_thread_return_value(sending_thread, 0);
_mark_thread_as_not_pending(sending_thread);
_ready_thread(sending_thread);
_reschedule_noyield(key);
_reschedule(key);
}
/**
@ -275,7 +275,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
* until the receiver consumes the message
*/
if (sending_thread->base.thread_state & _THREAD_DUMMY) {
_reschedule_noyield(key);
_reschedule(key);
return 0;
}
#endif

View file

@ -127,5 +127,5 @@ void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
slab->num_used--;
}
_reschedule_noyield(key);
_reschedule(key);
}

View file

@ -109,7 +109,7 @@ void k_mem_pool_free_id(struct k_mem_block_id *id)
}
if (need_sched && !_is_in_isr()) {
_reschedule_noyield(key);
_reschedule(key);
} else {
irq_unlock(key);
}

View file

@ -94,7 +94,7 @@ int _impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
_set_thread_return_value(pending_thread, 0);
_abort_thread_timeout(pending_thread);
_ready_thread(pending_thread);
_reschedule_noyield(key);
_reschedule(key);
return 0;
} else {
/* put message in queue */
@ -184,7 +184,7 @@ int _impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
_set_thread_return_value(pending_thread, 0);
_abort_thread_timeout(pending_thread);
_ready_thread(pending_thread);
_reschedule_noyield(key);
_reschedule(key);
return 0;
}
result = 0;
@ -229,7 +229,7 @@ void _impl_k_msgq_purge(struct k_msgq *q)
q->used_msgs = 0;
q->read_ptr = q->write_ptr;
_reschedule_noyield(key);
_reschedule(key);
}
#ifdef CONFIG_USERSPACE

View file

@ -510,7 +510,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
_sched_unlock_no_reschedule();
_pend_thread((struct k_thread *) &async_desc->thread,
&pipe->wait_q.writers, K_FOREVER);
_reschedule_noyield(key);
_reschedule(key);
return 0;
}
#endif

View file

@ -333,6 +333,6 @@ int k_poll_signal(struct k_poll_signal *signal, int result)
int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
_reschedule_noyield(key);
_reschedule(key);
return rc;
}

View file

@ -89,7 +89,7 @@ void k_queue_cancel_wait(struct k_queue *queue)
handle_poll_events(queue, K_POLL_STATE_NOT_READY);
#endif /* !CONFIG_POLL */
_reschedule_noyield(key);
_reschedule(key);
}
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
@ -102,7 +102,7 @@ void k_queue_insert(struct k_queue *queue, void *prev, void *data)
if (first_pending_thread) {
prepare_thread_to_run(first_pending_thread, data);
_reschedule_noyield(key);
_reschedule(key);
return;
}
#endif /* !CONFIG_POLL */
@ -113,7 +113,7 @@ void k_queue_insert(struct k_queue *queue, void *prev, void *data)
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
#endif /* CONFIG_POLL */
_reschedule_noyield(key);
_reschedule(key);
}
void k_queue_append(struct k_queue *queue, void *data)
@ -148,7 +148,7 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
#endif /* !CONFIG_POLL */
_reschedule_noyield(key);
_reschedule(key);
}
void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)

View file

@ -140,12 +140,12 @@ void _remove_thread_from_ready_q(struct k_thread *thread)
* Does not swap away from a thread at a cooperative (unpreemptible)
* priority unless "yield" is true.
*/
static int resched(int key, int yield)
int _reschedule(int key)
{
K_DEBUG("rescheduling threads\n");
if (!_is_in_isr() &&
(yield || _is_preempt(_current)) &&
_is_preempt(_current) &&
_is_prio_higher(_get_highest_ready_prio(), _current->base.prio)) {
K_DEBUG("context-switching out %p\n", _current);
return _Swap(key);
@ -155,16 +155,6 @@ static int resched(int key, int yield)
}
}
int _reschedule_noyield(int key)
{
return resched(key, 0);
}
int _reschedule_yield(int key)
{
return resched(key, 1);
}
void k_sched_lock(void)
{
_sched_lock();
@ -185,7 +175,7 @@ void k_sched_unlock(void)
K_DEBUG("scheduler unlocked (%p:%d)\n",
_current, _current->base.sched_locked);
_reschedule_noyield(key);
_reschedule(key);
#endif
}
@ -274,7 +264,7 @@ void _impl_k_thread_priority_set(k_tid_t tid, int prio)
int key = irq_lock();
_thread_priority_set(thread, prio);
_reschedule_noyield(key);
_reschedule(key);
}
#ifdef CONFIG_USERSPACE
@ -408,7 +398,7 @@ void _impl_k_wakeup(k_tid_t thread)
if (_is_in_isr()) {
irq_unlock(key);
} else {
_reschedule_noyield(key);
_reschedule(key);
}
}

View file

@ -135,7 +135,7 @@ void _impl_k_sem_give(struct k_sem *sem)
unsigned int key = irq_lock();
do_sem_give(sem);
_reschedule_noyield(key);
_reschedule(key);
}
#ifdef CONFIG_USERSPACE

View file

@ -228,7 +228,7 @@ void _impl_k_thread_start(struct k_thread *thread)
_mark_thread_as_started(thread);
_ready_thread(thread);
_reschedule_noyield(key);
_reschedule(key);
}
#ifdef CONFIG_USERSPACE
@ -484,7 +484,7 @@ void _impl_k_thread_resume(struct k_thread *thread)
_k_thread_single_resume(thread);
_reschedule_noyield(key);
_reschedule(key);
}
#ifdef CONFIG_USERSPACE

View file

@ -46,7 +46,7 @@ void _impl_k_thread_abort(k_tid_t thread)
}
/* The abort handler might have altered the ready queue. */
_reschedule_noyield(key);
_reschedule(key);
}
}
#endif

View file

@ -172,7 +172,7 @@ void _impl_k_timer_stop(struct k_timer *timer)
if (_is_in_isr()) {
irq_unlock(key);
} else {
_reschedule_noyield(key);
_reschedule(key);
}
}

View file

@ -23,7 +23,7 @@ int pthread_barrier_wait(pthread_barrier_t *b)
while (!sys_dlist_is_empty(&b->wait_q)) {
ready_one_thread(&b->wait_q);
}
return _reschedule_noyield(key);
return _reschedule(key);
} else {
return _pend_current_thread(key, &b->wait_q, K_FOREVER);
}