kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC mechanisms would do something that might effect the current thread choice, then check _must_switch_threads() (or occasionally __must_switch_threads -- don't ask, the distinction is being replaced by real English words), sometimes _is_in_isr() (but not always, even in contexts where that looks like it would be a mistake), and then call _Swap() if everything is OK, otherwise releasing the irq_lock(). Sometimes this was done directly, sometimes via the inverted test, sometimes (poll, heh) by doing the test when the thread state was modified and then needlessly passing the result up the call stack to the point of the _Swap(). And some places were just calling _reschedule_threads(), which did all this already. Unify all this madness. The old _reschedule_threads() function has split into two variants: _reschedule_yield() and _reschedule_noyield(). The latter is the "normal" one that respects the cooperative priority of the current thread (i.e. it won't switch out even if there is a higher priority thread ready -- the current thread has to pend itself first), the former is used in the handful of places where code was doing a swap unconditionally, just to preserve precise behavior across the refactor. I'm not at all convinced it should exist... Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
b481d0a045
commit
8606fabf74
19 changed files with 78 additions and 196 deletions
|
@ -49,5 +49,5 @@ void _impl_k_thread_abort(k_tid_t thread)
|
|||
}
|
||||
|
||||
/* The abort handler might have altered the ready queue. */
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
|
|
|
@ -532,7 +532,7 @@ void _impl_k_thread_abort(k_tid_t thread)
|
|||
}
|
||||
|
||||
/* The abort handler might have altered the ready queue. */
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -3948,7 +3948,7 @@ extern int k_poll_signal(struct k_poll_signal *signal, int result);
|
|||
/**
|
||||
* @internal
|
||||
*/
|
||||
extern int _handle_obj_poll_events(sys_dlist_t *events, u32_t state);
|
||||
extern void _handle_obj_poll_events(sys_dlist_t *events, u32_t state);
|
||||
|
||||
/** @} */
|
||||
|
||||
|
|
|
@ -21,13 +21,13 @@ extern k_tid_t const _idle_thread;
|
|||
|
||||
extern void _add_thread_to_ready_q(struct k_thread *thread);
|
||||
extern void _remove_thread_from_ready_q(struct k_thread *thread);
|
||||
extern void _reschedule_threads(int key);
|
||||
extern int _reschedule_noyield(int key);
|
||||
extern int _reschedule_yield(int key);
|
||||
extern void k_sched_unlock(void);
|
||||
extern void _pend_thread(struct k_thread *thread,
|
||||
_wait_q_t *wait_q, s32_t timeout);
|
||||
extern void _pend_current_thread(_wait_q_t *wait_q, s32_t timeout);
|
||||
extern void _move_thread_to_end_of_prio_q(struct k_thread *thread);
|
||||
extern int __must_switch_threads(void);
|
||||
extern int _is_thread_time_slicing(struct k_thread *thread);
|
||||
extern void _update_time_slice_before_swap(void);
|
||||
#ifdef _NON_OPTIMIZED_TICKS_PER_SEC
|
||||
|
@ -262,15 +262,6 @@ static inline int _get_highest_ready_prio(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Checks if current thread must be context-switched out. The caller must
|
||||
* already know that the execution context is a thread.
|
||||
*/
|
||||
static inline int _must_switch_threads(void)
|
||||
{
|
||||
return _is_preempt(_current) && __must_switch_threads();
|
||||
}
|
||||
|
||||
/*
|
||||
* Called directly by other internal kernel code.
|
||||
* Exposed to applications via k_sched_lock(), which just calls this
|
||||
|
|
|
@ -219,7 +219,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
|
|||
_set_thread_return_value(sending_thread, 0);
|
||||
_mark_thread_as_not_pending(sending_thread);
|
||||
_ready_thread(sending_thread);
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -276,7 +276,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
|||
* until the receiver consumes the message
|
||||
*/
|
||||
if (sending_thread->base.thread_state & _THREAD_DUMMY) {
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -123,15 +123,11 @@ void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
|
|||
_set_thread_return_value_with_data(pending_thread, 0, *mem);
|
||||
_abort_thread_timeout(pending_thread);
|
||||
_ready_thread(pending_thread);
|
||||
if (_must_switch_threads()) {
|
||||
_Swap(key);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
**(char ***)mem = slab->free_list;
|
||||
slab->free_list = *(char **)mem;
|
||||
slab->num_used--;
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ void k_mem_pool_free_id(struct k_mem_block_id *id)
|
|||
}
|
||||
|
||||
if (need_sched && !_is_in_isr()) {
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
} else {
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
|
|
@ -95,10 +95,8 @@ int _impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
|
|||
_set_thread_return_value(pending_thread, 0);
|
||||
_abort_thread_timeout(pending_thread);
|
||||
_ready_thread(pending_thread);
|
||||
if (!_is_in_isr() && _must_switch_threads()) {
|
||||
_Swap(key);
|
||||
return 0;
|
||||
}
|
||||
_reschedule_noyield(key);
|
||||
return 0;
|
||||
} else {
|
||||
/* put message in queue */
|
||||
memcpy(q->write_ptr, data, q->msg_size);
|
||||
|
@ -116,7 +114,7 @@ int _impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
|
|||
/* wait for put message success, failure, or timeout */
|
||||
_pend_current_thread(&q->wait_q, timeout);
|
||||
_current->base.swap_data = data;
|
||||
return _Swap(key);
|
||||
return _reschedule_yield(key);
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
|
@ -188,10 +186,8 @@ int _impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
|
|||
_set_thread_return_value(pending_thread, 0);
|
||||
_abort_thread_timeout(pending_thread);
|
||||
_ready_thread(pending_thread);
|
||||
if (!_is_in_isr() && _must_switch_threads()) {
|
||||
_Swap(key);
|
||||
return 0;
|
||||
}
|
||||
_reschedule_noyield(key);
|
||||
return 0;
|
||||
}
|
||||
result = 0;
|
||||
} else if (timeout == K_NO_WAIT) {
|
||||
|
@ -236,7 +232,7 @@ void _impl_k_msgq_purge(struct k_msgq *q)
|
|||
q->used_msgs = 0;
|
||||
q->read_ptr = q->write_ptr;
|
||||
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
|
|
@ -511,7 +511,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
|
|||
_sched_unlock_no_reschedule();
|
||||
_pend_thread((struct k_thread *) &async_desc->thread,
|
||||
&pipe->wait_q.writers, K_FOREVER);
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -267,11 +267,8 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
|
|||
}
|
||||
|
||||
/* must be called with interrupts locked */
|
||||
static int signal_poll_event(struct k_poll_event *event, u32_t state,
|
||||
int *must_reschedule)
|
||||
static int signal_poll_event(struct k_poll_event *event, u32_t state)
|
||||
{
|
||||
*must_reschedule = 0;
|
||||
|
||||
if (!event->poller) {
|
||||
goto ready_event;
|
||||
}
|
||||
|
@ -300,26 +297,20 @@ static int signal_poll_event(struct k_poll_event *event, u32_t state,
|
|||
}
|
||||
|
||||
_ready_thread(thread);
|
||||
*must_reschedule = !_is_in_isr() && _must_switch_threads();
|
||||
|
||||
ready_event:
|
||||
set_event_ready(event, state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* returns 1 if a reschedule must take place, 0 otherwise */
|
||||
int _handle_obj_poll_events(sys_dlist_t *events, u32_t state)
|
||||
void _handle_obj_poll_events(sys_dlist_t *events, u32_t state)
|
||||
{
|
||||
struct k_poll_event *poll_event;
|
||||
int must_reschedule;
|
||||
|
||||
poll_event = (struct k_poll_event *)sys_dlist_get(events);
|
||||
if (!poll_event) {
|
||||
return 0;
|
||||
if (poll_event) {
|
||||
(void) signal_poll_event(poll_event, state);
|
||||
}
|
||||
|
||||
(void) signal_poll_event(poll_event, state, &must_reschedule);
|
||||
return must_reschedule;
|
||||
}
|
||||
|
||||
void k_poll_signal_init(struct k_poll_signal *signal)
|
||||
|
@ -333,7 +324,6 @@ int k_poll_signal(struct k_poll_signal *signal, int result)
|
|||
{
|
||||
unsigned int key = irq_lock();
|
||||
struct k_poll_event *poll_event;
|
||||
int must_reschedule;
|
||||
|
||||
signal->result = result;
|
||||
signal->signaled = 1;
|
||||
|
@ -344,14 +334,8 @@ int k_poll_signal(struct k_poll_signal *signal, int result)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED,
|
||||
&must_reschedule);
|
||||
|
||||
if (must_reschedule) {
|
||||
(void)_Swap(key);
|
||||
} else {
|
||||
irq_unlock(key);
|
||||
}
|
||||
int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
|
||||
|
||||
_reschedule_noyield(key);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -68,13 +68,10 @@ static void prepare_thread_to_run(struct k_thread *thread, void *data)
|
|||
}
|
||||
#endif /* CONFIG_POLL */
|
||||
|
||||
/* returns 1 if a reschedule must take place, 0 otherwise */
|
||||
static inline int handle_poll_events(struct k_queue *queue, u32_t state)
|
||||
static inline void handle_poll_events(struct k_queue *queue, u32_t state)
|
||||
{
|
||||
#ifdef CONFIG_POLL
|
||||
return _handle_obj_poll_events(&queue->poll_events, state);
|
||||
#else
|
||||
return 0;
|
||||
_handle_obj_poll_events(&queue->poll_events, state);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -88,19 +85,12 @@ void k_queue_cancel_wait(struct k_queue *queue)
|
|||
|
||||
if (first_pending_thread) {
|
||||
prepare_thread_to_run(first_pending_thread, NULL);
|
||||
if (!_is_in_isr() && _must_switch_threads()) {
|
||||
(void)_Swap(key);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (handle_poll_events(queue, K_POLL_STATE_NOT_READY)) {
|
||||
(void)_Swap(key);
|
||||
return;
|
||||
}
|
||||
handle_poll_events(queue, K_POLL_STATE_NOT_READY);
|
||||
#endif /* !CONFIG_POLL */
|
||||
|
||||
irq_unlock(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
|
||||
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
|
||||
|
@ -113,11 +103,7 @@ void k_queue_insert(struct k_queue *queue, void *prev, void *data)
|
|||
|
||||
if (first_pending_thread) {
|
||||
prepare_thread_to_run(first_pending_thread, data);
|
||||
if (!_is_in_isr() && _must_switch_threads()) {
|
||||
(void)_Swap(key);
|
||||
return;
|
||||
}
|
||||
irq_unlock(key);
|
||||
_reschedule_noyield(key);
|
||||
return;
|
||||
}
|
||||
#endif /* !CONFIG_POLL */
|
||||
|
@ -125,13 +111,10 @@ void k_queue_insert(struct k_queue *queue, void *prev, void *data)
|
|||
sys_slist_insert(&queue->data_q, prev, data);
|
||||
|
||||
#if defined(CONFIG_POLL)
|
||||
if (handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE)) {
|
||||
(void)_Swap(key);
|
||||
return;
|
||||
}
|
||||
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
|
||||
#endif /* CONFIG_POLL */
|
||||
|
||||
irq_unlock(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
|
||||
void k_queue_append(struct k_queue *queue, void *data)
|
||||
|
@ -148,7 +131,6 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
|||
{
|
||||
__ASSERT(head && tail, "invalid head or tail");
|
||||
|
||||
int need_sched = 0;
|
||||
unsigned int key = irq_lock();
|
||||
#if !defined(CONFIG_POLL)
|
||||
struct k_thread *thread;
|
||||
|
@ -156,7 +138,6 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
|||
while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) {
|
||||
prepare_thread_to_run(thread, head);
|
||||
head = *(void **)head;
|
||||
need_sched = 1;
|
||||
}
|
||||
|
||||
if (head) {
|
||||
|
@ -165,18 +146,10 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
|||
|
||||
#else
|
||||
sys_slist_append_list(&queue->data_q, head, tail);
|
||||
if (handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE)) {
|
||||
(void)_Swap(key);
|
||||
return;
|
||||
}
|
||||
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
|
||||
#endif /* !CONFIG_POLL */
|
||||
|
||||
if (need_sched) {
|
||||
_reschedule_threads(key);
|
||||
return;
|
||||
} else {
|
||||
irq_unlock(key);
|
||||
}
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
|
||||
void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
|
||||
|
|
|
@ -135,23 +135,34 @@ void _remove_thread_from_ready_q(struct k_thread *thread)
|
|||
#endif
|
||||
}
|
||||
|
||||
/* reschedule threads if the scheduler is not locked */
|
||||
/* not callable from ISR */
|
||||
/* must be called with interrupts locked */
|
||||
void _reschedule_threads(int key)
|
||||
/* Releases the irq_lock and swaps to a higher priority thread if one
|
||||
* is available, returning the _Swap() return value, otherwise zero.
|
||||
* Does not swap away from a thread at a cooperative (unpreemptible)
|
||||
* priority unless "yield" is true.
|
||||
*/
|
||||
static int resched(int key, int yield)
|
||||
{
|
||||
#ifdef CONFIG_PREEMPT_ENABLED
|
||||
K_DEBUG("rescheduling threads\n");
|
||||
|
||||
if (_must_switch_threads()) {
|
||||
if (!_is_in_isr() &&
|
||||
(yield || _is_preempt(_current)) &&
|
||||
_is_prio_higher(_get_highest_ready_prio(), _current->base.prio)) {
|
||||
K_DEBUG("context-switching out %p\n", _current);
|
||||
_Swap(key);
|
||||
return _Swap(key);
|
||||
} else {
|
||||
irq_unlock(key);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
irq_unlock(key);
|
||||
#endif
|
||||
}
|
||||
|
||||
int _reschedule_noyield(int key)
|
||||
{
|
||||
return resched(key, 0);
|
||||
}
|
||||
|
||||
int _reschedule_yield(int key)
|
||||
{
|
||||
return resched(key, 1);
|
||||
}
|
||||
|
||||
void k_sched_lock(void)
|
||||
|
@ -174,7 +185,7 @@ void k_sched_unlock(void)
|
|||
K_DEBUG("scheduler unlocked (%p:%d)\n",
|
||||
_current, _current->base.sched_locked);
|
||||
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -237,43 +248,6 @@ void _pend_current_thread(_wait_q_t *wait_q, s32_t timeout)
|
|||
_pend_thread(_current, wait_q, timeout);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PREEMPT_ENABLED) && defined(CONFIG_KERNEL_DEBUG)
|
||||
/* debug aid */
|
||||
static void dump_ready_q(void)
|
||||
{
|
||||
K_DEBUG("bitmaps: ");
|
||||
for (int bitmap = 0; bitmap < K_NUM_PRIO_BITMAPS; bitmap++) {
|
||||
K_DEBUG("%x", _ready_q.prio_bmap[bitmap]);
|
||||
}
|
||||
K_DEBUG("\n");
|
||||
for (int prio = 0; prio < K_NUM_PRIORITIES; prio++) {
|
||||
K_DEBUG("prio: %d, head: %p\n",
|
||||
prio - _NUM_COOP_PRIO,
|
||||
sys_dlist_peek_head(&_ready_q.q[prio]));
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_PREEMPT_ENABLED && CONFIG_KERNEL_DEBUG */
|
||||
|
||||
/*
|
||||
* Check if there is a thread of higher prio than the current one. Should only
|
||||
* be called if we already know that the current thread is preemptible.
|
||||
*/
|
||||
int __must_switch_threads(void)
|
||||
{
|
||||
#ifdef CONFIG_PREEMPT_ENABLED
|
||||
K_DEBUG("current prio: %d, highest prio: %d\n",
|
||||
_current->base.prio, _get_highest_ready_prio());
|
||||
|
||||
#ifdef CONFIG_KERNEL_DEBUG
|
||||
dump_ready_q();
|
||||
#endif /* CONFIG_KERNEL_DEBUG */
|
||||
|
||||
return _is_prio_higher(_get_highest_ready_prio(), _current->base.prio);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int _impl_k_thread_priority_get(k_tid_t thread)
|
||||
{
|
||||
return thread->base.prio;
|
||||
|
@ -297,7 +271,7 @@ void _impl_k_thread_priority_set(k_tid_t tid, int prio)
|
|||
int key = irq_lock();
|
||||
|
||||
_thread_priority_set(thread, prio);
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
@ -431,7 +405,7 @@ void _impl_k_wakeup(k_tid_t thread)
|
|||
if (_is_in_isr()) {
|
||||
irq_unlock(key);
|
||||
} else {
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
36
kernel/sem.c
36
kernel/sem.c
|
@ -82,15 +82,10 @@ _SYSCALL_HANDLER(k_sem_init, sem, initial_count, limit)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* returns 1 if a reschedule must take place, 0 otherwise */
|
||||
static inline int handle_poll_events(struct k_sem *sem)
|
||||
static inline void handle_poll_events(struct k_sem *sem)
|
||||
{
|
||||
#ifdef CONFIG_POLL
|
||||
u32_t state = K_POLL_STATE_SEM_AVAILABLE;
|
||||
|
||||
return _handle_obj_poll_events(&sem->poll_events, state);
|
||||
#else
|
||||
return 0;
|
||||
_handle_obj_poll_events(&sem->poll_events, K_POLL_STATE_SEM_AVAILABLE);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -99,20 +94,18 @@ static inline void increment_count_up_to_limit(struct k_sem *sem)
|
|||
sem->count += (sem->count != sem->limit);
|
||||
}
|
||||
|
||||
/* returns 1 if _Swap() will need to be invoked, 0 otherwise */
|
||||
static int do_sem_give(struct k_sem *sem)
|
||||
static void do_sem_give(struct k_sem *sem)
|
||||
{
|
||||
struct k_thread *thread = _unpend_first_thread(&sem->wait_q);
|
||||
|
||||
if (!thread) {
|
||||
if (thread) {
|
||||
(void)_abort_thread_timeout(thread);
|
||||
_ready_thread(thread);
|
||||
_set_thread_return_value(thread, 0);
|
||||
} else {
|
||||
increment_count_up_to_limit(sem);
|
||||
return handle_poll_events(sem);
|
||||
handle_poll_events(sem);
|
||||
}
|
||||
(void)_abort_thread_timeout(thread);
|
||||
_ready_thread(thread);
|
||||
_set_thread_return_value(thread, 0);
|
||||
|
||||
return !_is_in_isr() && _must_switch_threads();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -140,15 +133,10 @@ void _sem_give_non_preemptible(struct k_sem *sem)
|
|||
|
||||
void _impl_k_sem_give(struct k_sem *sem)
|
||||
{
|
||||
unsigned int key;
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
key = irq_lock();
|
||||
|
||||
if (do_sem_give(sem)) {
|
||||
_Swap(key);
|
||||
} else {
|
||||
irq_unlock(key);
|
||||
}
|
||||
do_sem_give(sem);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
|
|
@ -86,17 +86,14 @@ void _impl_k_stack_push(struct k_stack *stack, u32_t data)
|
|||
|
||||
_set_thread_return_value_with_data(first_pending_thread,
|
||||
0, (void *)data);
|
||||
|
||||
if (!_is_in_isr() && _must_switch_threads()) {
|
||||
(void)_Swap(key);
|
||||
return;
|
||||
}
|
||||
_reschedule_noyield(key);
|
||||
return;
|
||||
} else {
|
||||
*(stack->next) = data;
|
||||
stack->next++;
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
|
|
@ -228,7 +228,7 @@ void _impl_k_thread_start(struct k_thread *thread)
|
|||
|
||||
_mark_thread_as_started(thread);
|
||||
_ready_thread(thread);
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
@ -484,7 +484,7 @@ void _impl_k_thread_resume(struct k_thread *thread)
|
|||
|
||||
_k_thread_single_resume(thread);
|
||||
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
|
|
@ -46,7 +46,7 @@ void _impl_k_thread_abort(k_tid_t thread)
|
|||
}
|
||||
|
||||
/* The abort handler might have altered the ready queue. */
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -173,7 +173,7 @@ void _impl_k_timer_stop(struct k_timer *timer)
|
|||
if (_is_in_isr()) {
|
||||
irq_unlock(key);
|
||||
} else {
|
||||
_reschedule_threads(key);
|
||||
_reschedule_noyield(key);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,14 +24,9 @@ int pthread_barrier_wait(pthread_barrier_t *b)
|
|||
while (!sys_dlist_is_empty(&b->wait_q)) {
|
||||
ready_one_thread(&b->wait_q);
|
||||
}
|
||||
|
||||
if (!__must_switch_threads()) {
|
||||
irq_unlock(key);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
_pend_current_thread(&b->wait_q, K_FOREVER);
|
||||
}
|
||||
|
||||
return _Swap(key);
|
||||
return _reschedule_noyield(key);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut, int timeout)
|
|||
ready_one_thread(&mut->sem->wait_q);
|
||||
_pend_current_thread(&cv->wait_q, timeout);
|
||||
|
||||
ret = _Swap(key);
|
||||
ret = _reschedule_yield(key);
|
||||
|
||||
/* FIXME: this extra lock (and the potential context switch it
|
||||
* can cause) could be optimized out. At the point of the
|
||||
|
@ -47,25 +47,13 @@ static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut, int timeout)
|
|||
*
|
||||
* https://blog.mozilla.org/nfroyd/2017/03/29/on-mutex-performance-part-1/
|
||||
*/
|
||||
static void swap_or_unlock(int key)
|
||||
{
|
||||
/* API madness: use __ not _ here. The latter checks for our
|
||||
* preemption state, but we want to do a switch here even if
|
||||
* we can be preempted.
|
||||
*/
|
||||
if (!_is_in_isr() && __must_switch_threads()) {
|
||||
_Swap(key);
|
||||
} else {
|
||||
irq_unlock(key);
|
||||
}
|
||||
}
|
||||
|
||||
int pthread_cond_signal(pthread_cond_t *cv)
|
||||
{
|
||||
int key = irq_lock();
|
||||
|
||||
ready_one_thread(&cv->wait_q);
|
||||
swap_or_unlock(key);
|
||||
reschedule_yield(key);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -78,7 +66,7 @@ int pthread_cond_broadcast(pthread_cond_t *cv)
|
|||
ready_one_thread(&cv->wait_q);
|
||||
}
|
||||
|
||||
swap_or_unlock(key);
|
||||
reschedule_yield(key);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue