diff --git a/arch/arm/core/thread_abort.c b/arch/arm/core/thread_abort.c index 619e1f8ff6a..eeb0aa607f8 100644 --- a/arch/arm/core/thread_abort.c +++ b/arch/arm/core/thread_abort.c @@ -49,5 +49,5 @@ void _impl_k_thread_abort(k_tid_t thread) } /* The abort handler might have altered the ready queue. */ - _reschedule(key); + _reschedule_irqlock(key); } diff --git a/arch/posix/core/posix_core.c b/arch/posix/core/posix_core.c index 977d2c9ae8f..5f14e936fc2 100644 --- a/arch/posix/core/posix_core.c +++ b/arch/posix/core/posix_core.c @@ -531,7 +531,7 @@ void _impl_k_thread_abort(k_tid_t thread) } /* The abort handler might have altered the ready queue. */ - _reschedule(key); + _reschedule_irqlock(key); } #endif diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index bad96aaefe9..a72072a3417 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -38,9 +38,12 @@ void _move_thread_to_end_of_prio_q(struct k_thread *thread); void _remove_thread_from_ready_q(struct k_thread *thread); int _is_thread_time_slicing(struct k_thread *thread); void _unpend_thread_no_timeout(struct k_thread *thread); -int _pend_current_thread(u32_t key, _wait_q_t *wait_q, s32_t timeout); +int _pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, + _wait_q_t *wait_q, s32_t timeout); +int _pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout); void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout); -void _reschedule(u32_t key); +void _reschedule(struct k_spinlock *lock, k_spinlock_key_t key); +void _reschedule_irqlock(u32_t key); struct k_thread *_unpend_first_thread(_wait_q_t *wait_q); void _unpend_thread(struct k_thread *thread); int _unpend_all(_wait_q_t *wait_q); @@ -62,7 +65,6 @@ static ALWAYS_INLINE struct k_thread *_get_next_ready_thread(void) } #endif - static inline bool _is_idle_thread(void *entry_point) { return entry_point == idle; diff --git a/kernel/mailbox.c b/kernel/mailbox.c index 890558cf1db..fe14a48f7b7 100644 --- a/kernel/mailbox.c +++ b/kernel/mailbox.c @@ -220,7 +220,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg) _set_thread_return_value(sending_thread, 0); _mark_thread_as_not_pending(sending_thread); _ready_thread(sending_thread); - _reschedule(key); + _reschedule_irqlock(key); } /** @@ -276,7 +276,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, */ if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0) { - _reschedule(key); + _reschedule_irqlock(key); return 0; } #endif @@ -285,7 +285,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, * synchronous send: pend current thread (unqueued) * until the receiver consumes the message */ - return _pend_current_thread(key, NULL, K_FOREVER); + return _pend_curr_irqlock(key, NULL, K_FOREVER); } } @@ -306,7 +306,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, #endif /* synchronous send: sender waits on tx queue for receiver or timeout */ - return _pend_current_thread(key, &mbox->tx_msg_queue, timeout); + return _pend_curr_irqlock(key, &mbox->tx_msg_queue, timeout); } int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, s32_t timeout) @@ -458,7 +458,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, /* wait until a matching sender appears or a timeout occurs */ _current->base.swap_data = rx_msg; - result = _pend_current_thread(key, &mbox->rx_msg_queue, timeout); + result = _pend_curr_irqlock(key, &mbox->rx_msg_queue, timeout); /* consume message data immediately, if needed */ if (result == 0) { diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c index f23ba5f3f22..2b5088a4b92 100644 --- a/kernel/mem_slab.c +++ b/kernel/mem_slab.c @@ -109,7 +109,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout) result = -ENOMEM; } else { /* wait for a free block or timeout */ - result = _pend_current_thread(key, &slab->wait_q, timeout); + result = _pend_curr_irqlock(key, &slab->wait_q, timeout); if (result == 0) { *mem = _current->base.swap_data; } @@ -129,7 +129,7 @@ void k_mem_slab_free(struct k_mem_slab *slab, void **mem) if (pending_thread != NULL) { _set_thread_return_value_with_data(pending_thread, 0, *mem); _ready_thread(pending_thread); - _reschedule(key); + _reschedule_irqlock(key); } else { **(char ***)mem = slab->free_list; slab->free_list = *(char **)mem; diff --git a/kernel/mempool.c b/kernel/mempool.c index 2f839f784c2..9279540e6b6 100644 --- a/kernel/mempool.c +++ b/kernel/mempool.c @@ -90,7 +90,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, return ret; } - (void)_pend_current_thread(irq_lock(), &p->wait_q, timeout); + (void)_pend_curr_irqlock(irq_lock(), &p->wait_q, timeout); if (timeout != K_FOREVER) { timeout = end - z_tick_get(); @@ -119,7 +119,7 @@ void k_mem_pool_free_id(struct k_mem_block_id *id) need_sched = _unpend_all(&p->wait_q); if (need_sched && !_is_in_isr()) { - _reschedule(key); + _reschedule_irqlock(key); } else { irq_unlock(key); } diff --git a/kernel/msg_q.c b/kernel/msg_q.c index 0a71959a7d1..cd594b64070 100644 --- a/kernel/msg_q.c +++ b/kernel/msg_q.c @@ -127,7 +127,7 @@ int _impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout) /* wake up waiting thread */ _set_thread_return_value(pending_thread, 0); _ready_thread(pending_thread); - _reschedule(key); + _reschedule_irqlock(key); return 0; } else { /* put message in queue */ @@ -145,7 +145,7 @@ int _impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout) } else { /* wait for put message success, failure, or timeout */ _current->base.swap_data = data; - return _pend_current_thread(key, &q->wait_q, timeout); + return _pend_curr_irqlock(key, &q->wait_q, timeout); } irq_unlock(key); @@ -216,7 +216,7 @@ int _impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout) /* wake up waiting thread */ _set_thread_return_value(pending_thread, 0); _ready_thread(pending_thread); - _reschedule(key); + _reschedule_irqlock(key); return 0; } result = 0; @@ -226,7 +226,7 @@ int _impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout) } else { /* wait for get message success or timeout */ _current->base.swap_data = data; - return _pend_current_thread(key, &q->wait_q, timeout); + return _pend_curr_irqlock(key, &q->wait_q, timeout); } irq_unlock(key); @@ -291,7 +291,7 @@ void _impl_k_msgq_purge(struct k_msgq *q) q->used_msgs = 0; q->read_ptr = q->write_ptr; - _reschedule(key); + _reschedule_irqlock(key); } #ifdef CONFIG_USERSPACE diff --git a/kernel/mutex.c b/kernel/mutex.c index 711f623aec2..f14f68d5f2b 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -162,7 +162,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) adjust_owner_prio(mutex, new_prio); } - s32_t got_mutex = _pend_current_thread(key, &mutex->wait_q, timeout); + s32_t got_mutex = _pend_curr_irqlock(key, &mutex->wait_q, timeout); K_DEBUG("on mutex %p got_mutex value: %d\n", mutex, got_mutex); diff --git a/kernel/pipes.c b/kernel/pipes.c index 0d84c85235a..2d530dd3cf0 100644 --- a/kernel/pipes.c +++ b/kernel/pipes.c @@ -548,7 +548,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, _pend_thread((struct k_thread *) &async_desc->thread, &pipe->wait_q.writers, K_FOREVER); - _reschedule(key); + _reschedule_irqlock(key); return 0; } #endif @@ -566,7 +566,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, */ key = irq_lock(); _sched_unlock_no_reschedule(); - (void)_pend_current_thread(key, &pipe->wait_q.writers, timeout); + (void)_pend_curr_irqlock(key, &pipe->wait_q.writers, timeout); } else { k_sched_unlock(); } @@ -708,7 +708,7 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, _current->base.swap_data = &pipe_desc; key = irq_lock(); _sched_unlock_no_reschedule(); - (void)_pend_current_thread(key, &pipe->wait_q.readers, timeout); + (void)_pend_curr_irqlock(key, &pipe->wait_q.readers, timeout); } else { k_sched_unlock(); } diff --git a/kernel/poll.c b/kernel/poll.c index ab260ef99c8..95dd2bc9ff3 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -231,7 +231,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) _wait_q_t wait_q = _WAIT_Q_INIT(&wait_q); - int swap_rc = _pend_current_thread(key, &wait_q, timeout); + int swap_rc = _pend_curr_irqlock(key, &wait_q, timeout); /* * Clear all event registrations. If events happen while we're in this @@ -424,7 +424,7 @@ int _impl_k_poll_signal_raise(struct k_poll_signal *signal, int result) int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED); - _reschedule(key); + _reschedule_irqlock(key); return rc; } diff --git a/kernel/queue.c b/kernel/queue.c index 62d06f38070..9178ba33991 100644 --- a/kernel/queue.c +++ b/kernel/queue.c @@ -135,7 +135,7 @@ void _impl_k_queue_cancel_wait(struct k_queue *queue) handle_poll_events(queue, K_POLL_STATE_CANCELLED); #endif /* !CONFIG_POLL */ - _reschedule(key); + _reschedule_irqlock(key); } #ifdef CONFIG_USERSPACE @@ -154,7 +154,7 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data, if (first_pending_thread != NULL) { prepare_thread_to_run(first_pending_thread, data); - _reschedule(key); + _reschedule_irqlock(key); return 0; } #endif /* !CONFIG_POLL */ @@ -179,7 +179,7 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data, handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE); #endif /* CONFIG_POLL */ - _reschedule(key); + _reschedule_irqlock(key); return 0; } @@ -257,7 +257,7 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail) handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE); #endif /* !CONFIG_POLL */ - _reschedule(key); + _reschedule_irqlock(key); } void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list) @@ -346,7 +346,7 @@ void *_impl_k_queue_get(struct k_queue *queue, s32_t timeout) return k_queue_poll(queue, timeout); #else - int ret = _pend_current_thread(key, &queue->wait_q, timeout); + int ret = _pend_curr_irqlock(key, &queue->wait_q, timeout); return (ret != 0) ? NULL : _current->base.swap_data; #endif /* CONFIG_POLL */ diff --git a/kernel/sched.c b/kernel/sched.c index 6bed158be4a..e52f1ece8e3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -428,7 +428,7 @@ void z_thread_timeout(struct _timeout *to) } #endif -int _pend_current_thread(u32_t key, _wait_q_t *wait_q, s32_t timeout) +int _pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout) { #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) pending_current = _current; @@ -437,6 +437,16 @@ int _pend_current_thread(u32_t key, _wait_q_t *wait_q, s32_t timeout) return _Swap_irqlock(key); } +int _pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, + _wait_q_t *wait_q, s32_t timeout) +{ +#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) + pending_current = _current; +#endif + pend(_current, wait_q, timeout); + return _Swap(lock, key); +} + struct k_thread *_unpend_first_thread(_wait_q_t *wait_q) { struct k_thread *t = _unpend1_no_timeout(wait_q); @@ -480,29 +490,38 @@ void _thread_priority_set(struct k_thread *thread, int prio) sys_trace_thread_priority_set(thread); if (need_sched) { - _reschedule(irq_lock()); + _reschedule_irqlock(irq_lock()); } } -void _reschedule(u32_t key) +static inline int resched(void) { #ifdef CONFIG_SMP if (!_current_cpu->swap_ok) { - goto noswap; + return 0; } - _current_cpu->swap_ok = 0; #endif - if (_is_in_isr()) { - goto noswap; + return !_is_in_isr(); +} + +void _reschedule(struct k_spinlock *lock, k_spinlock_key_t key) +{ + if (resched()) { + _Swap(lock, key); + } else { + k_spin_unlock(lock, key); } +} - (void)_Swap_irqlock(key); - return; - - noswap: - irq_unlock(key); +void _reschedule_irqlock(u32_t key) +{ + if (resched()) { + _Swap_irqlock(key); + } else { + irq_unlock(key); + } } void k_sched_lock(void) @@ -526,7 +545,7 @@ void k_sched_unlock(void) K_DEBUG("scheduler unlocked (%p:%d)\n", _current, _current->base.sched_locked); - _reschedule(irq_lock()); + _reschedule_irqlock(irq_lock()); #endif } @@ -922,7 +941,7 @@ void _impl_k_wakeup(k_tid_t thread) if (_is_in_isr()) { irq_unlock(key); } else { - _reschedule(key); + _reschedule_irqlock(key); } } diff --git a/kernel/sem.c b/kernel/sem.c index 0207fd0f6db..7280497253b 100644 --- a/kernel/sem.c +++ b/kernel/sem.c @@ -119,7 +119,7 @@ void _impl_k_sem_give(struct k_sem *sem) sys_trace_void(SYS_TRACE_ID_SEMA_GIVE); do_sem_give(sem); sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE); - _reschedule(key); + _reschedule_irqlock(key); } #ifdef CONFIG_USERSPACE @@ -148,7 +148,7 @@ int _impl_k_sem_take(struct k_sem *sem, s32_t timeout) sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE); - return _pend_current_thread(key, &sem->wait_q, timeout); + return _pend_curr_irqlock(key, &sem->wait_q, timeout); } #ifdef CONFIG_USERSPACE diff --git a/kernel/stack.c b/kernel/stack.c index af16e5995d8..2c95b0beced 100644 --- a/kernel/stack.c +++ b/kernel/stack.c @@ -112,7 +112,7 @@ void _impl_k_stack_push(struct k_stack *stack, u32_t data) _set_thread_return_value_with_data(first_pending_thread, 0, (void *)data); - _reschedule(key); + _reschedule_irqlock(key); return; } else { *(stack->next) = data; @@ -155,7 +155,7 @@ int _impl_k_stack_pop(struct k_stack *stack, u32_t *data, s32_t timeout) return -EBUSY; } - result = _pend_current_thread(key, &stack->wait_q, timeout); + result = _pend_curr_irqlock(key, &stack->wait_q, timeout); if (result == -EAGAIN) { return -EAGAIN; } diff --git a/kernel/thread.c b/kernel/thread.c index 80c391508ca..e124e13fef7 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -271,7 +271,7 @@ void _impl_k_thread_start(struct k_thread *thread) _mark_thread_as_started(thread); _ready_thread(thread); - _reschedule(key); + _reschedule_irqlock(key); } #ifdef CONFIG_USERSPACE @@ -541,7 +541,7 @@ void _impl_k_thread_suspend(struct k_thread *thread) sys_trace_thread_suspend(thread); if (thread == _current) { - _reschedule(key); + _reschedule_irqlock(key); } else { irq_unlock(key); } @@ -564,7 +564,7 @@ void _impl_k_thread_resume(struct k_thread *thread) _k_thread_single_resume(thread); sys_trace_thread_resume(thread); - _reschedule(key); + _reschedule_irqlock(key); } #ifdef CONFIG_USERSPACE diff --git a/kernel/thread_abort.c b/kernel/thread_abort.c index 7286cf814f0..f739171ce43 100644 --- a/kernel/thread_abort.c +++ b/kernel/thread_abort.c @@ -37,7 +37,7 @@ void _impl_k_thread_abort(k_tid_t thread) _k_thread_single_abort(thread); _thread_monitor_exit(thread); - _reschedule(key); + _reschedule_irqlock(key); } #endif diff --git a/kernel/timer.c b/kernel/timer.c index ce49859b732..e523d7d5096 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -172,7 +172,7 @@ void _impl_k_timer_stop(struct k_timer *timer) if (_is_in_isr()) { irq_unlock(key); } else { - _reschedule(key); + _reschedule_irqlock(key); } } @@ -205,7 +205,7 @@ u32_t _impl_k_timer_status_sync(struct k_timer *timer) if (result == 0) { if (!_is_inactive_timeout(&timer->timeout)) { /* wait for timer to expire or stop */ - (void)_pend_current_thread(key, &timer->wait_q, K_FOREVER); + (void)_pend_curr_irqlock(key, &timer->wait_q, K_FOREVER); /* get updated timer status */ key = irq_lock(); diff --git a/lib/posix/pthread_barrier.c b/lib/posix/pthread_barrier.c index e75cb8e6400..e968031554d 100644 --- a/lib/posix/pthread_barrier.c +++ b/lib/posix/pthread_barrier.c @@ -24,9 +24,9 @@ int pthread_barrier_wait(pthread_barrier_t *b) while (_waitq_head(&b->wait_q)) { _ready_one_thread(&b->wait_q); } - _reschedule(key); + _reschedule_irqlock(key); return 0; } else { - return _pend_current_thread(key, &b->wait_q, K_FOREVER); + return _pend_curr_irqlock(key, &b->wait_q, K_FOREVER); } } diff --git a/lib/posix/pthread_cond.c b/lib/posix/pthread_cond.c index 5c433c32732..4b193ac9935 100644 --- a/lib/posix/pthread_cond.c +++ b/lib/posix/pthread_cond.c @@ -18,7 +18,7 @@ static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut, int timeout) mut->lock_count = 0; mut->owner = NULL; _ready_one_thread(&mut->wait_q); - ret = _pend_current_thread(key, &cv->wait_q, timeout); + ret = _pend_curr_irqlock(key, &cv->wait_q, timeout); /* FIXME: this extra lock (and the potential context switch it * can cause) could be optimized out. At the point of the @@ -49,7 +49,7 @@ int pthread_cond_signal(pthread_cond_t *cv) int key = irq_lock(); _ready_one_thread(&cv->wait_q); - _reschedule(key); + _reschedule_irqlock(key); return 0; } @@ -62,7 +62,7 @@ int pthread_cond_broadcast(pthread_cond_t *cv) _ready_one_thread(&cv->wait_q); } - _reschedule(key); + _reschedule_irqlock(key); return 0; } diff --git a/lib/posix/pthread_mutex.c b/lib/posix/pthread_mutex.c index 9c785a366d3..39a9e4491da 100644 --- a/lib/posix/pthread_mutex.c +++ b/lib/posix/pthread_mutex.c @@ -48,7 +48,7 @@ static int acquire_mutex(pthread_mutex_t *m, int timeout) return EINVAL; } - rc = _pend_current_thread(key, &m->wait_q, timeout); + rc = _pend_curr_irqlock(key, &m->wait_q, timeout); if (rc != 0) { rc = ETIMEDOUT; } @@ -141,7 +141,7 @@ int pthread_mutex_unlock(pthread_mutex_t *m) m->lock_count++; _ready_thread(thread); _set_thread_return_value(thread, 0); - _reschedule(key); + _reschedule_irqlock(key); return 0; } m->owner = NULL; diff --git a/tests/benchmarks/sched/README.rst b/tests/benchmarks/sched/README.rst index 928659cd18c..89164b061be 100644 --- a/tests/benchmarks/sched/README.rst +++ b/tests/benchmarks/sched/README.rst @@ -6,13 +6,13 @@ latencies (not scaling performance) of specific low level scheduling primitives independent of overhead from application or API abstractions. It works very simply: a main thread creates a "partner" thread at a higher priority, the partner then sleeps using -_pend_current_thread(). From this initial state: +_pend_curr_irqlock(). From this initial state: 1. The main thread calls _unpend_first_thread() 2. The main thread calls _ready_thread() 3. The main thread calls k_yield() (the kernel switches to the partner thread) -4. The partner thread then runs and calls _pend_current_thread() again +4. The partner thread then runs and calls _pend_curr_irqlock() again (the kernel switches to the main thread) 5. The main thread returns from k_yield() diff --git a/tests/benchmarks/sched/src/main.c b/tests/benchmarks/sched/src/main.c index 225575a6866..2c5946463d9 100644 --- a/tests/benchmarks/sched/src/main.c +++ b/tests/benchmarks/sched/src/main.c @@ -13,14 +13,14 @@ * of specific low level scheduling primitives independent of overhead * from application or API abstractions. It works very simply: a main * thread creates a "partner" thread at a higher priority, the partner - * then sleeps using _pend_current_thread(). From this initial + * then sleeps using _pend_curr_irqlock(). From this initial * state: * * 1. The main thread calls _unpend_first_thread() * 2. The main thread calls _ready_thread() * 3. The main thread calls k_yield() * (the kernel switches to the partner thread) - * 4. The partner thread then runs and calls _pend_current_thread() again + * 4. The partner thread then runs and calls _pend_curr_irqlock() again * (the kernel switches to the main thread) * 5. The main thread returns from k_yield() * @@ -90,7 +90,7 @@ static void partner_fn(void *arg1, void *arg2, void *arg3) while (true) { unsigned int key = irq_lock(); - _pend_current_thread(key, &waitq, K_FOREVER); + _pend_curr_irqlock(key, &waitq, K_FOREVER); stamp(PARTNER_AWAKE_PENDING); } }