kernel: Apply 'unlikely' attribute

Applies the 'unlikely' attribute to various kernel objects that
use z_unpend_first_thread() to optimize for the non-blocking path.

This boosts the thread_metric synchronization benchmark numbers
on the frdm_k64f board by about 10%.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
Peter Mitsis 2024-10-09 15:36:48 -07:00 committed by Anas Nashif
commit cc415bc139
8 changed files with 10 additions and 10 deletions

View file

@ -49,7 +49,7 @@ int z_impl_k_condvar_signal(struct k_condvar *condvar)
struct k_thread *thread = z_unpend_first_thread(&condvar->wait_q);
if (thread != NULL) {
if (unlikely(thread != NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_condvar, signal, condvar, K_FOREVER);
arch_thread_return_value_set(thread, 0);

View file

@ -275,7 +275,7 @@ void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
if ((slab->free_list == NULL) && IS_ENABLED(CONFIG_MULTITHREADING)) {
struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);
if (pending_thread != NULL) {
if (unlikely(pending_thread != NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, free, slab);
z_thread_return_value_set_with_data(pending_thread, 0, mem);

View file

@ -136,7 +136,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout
if (msgq->used_msgs < msgq->max_msgs) {
/* message queue isn't full */
pending_thread = z_unpend_first_thread(&msgq->wait_q);
if (pending_thread != NULL) {
if (unlikely(pending_thread != NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, put, msgq, timeout, 0);
/* give message to waiting thread */
@ -236,7 +236,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
/* handle first thread waiting to write (if any) */
pending_thread = z_unpend_first_thread(&msgq->wait_q);
if (pending_thread != NULL) {
if (unlikely(pending_thread != NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, get, msgq, timeout);
/* add thread's message to queue */

View file

@ -261,7 +261,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex)
LOG_DBG("new owner of mutex %p: %p (prio: %d)",
mutex, new_owner, new_owner ? new_owner->base.prio : -1000);
if (new_owner != NULL) {
if (unlikely(new_owner != NULL)) {
/*
* new owner is already of higher or equal prio than first
* waiter since the wait queue is priority-based: no need to

View file

@ -133,7 +133,7 @@ static int32_t queue_insert(struct k_queue *queue, void *prev, void *data,
}
first_pending_thread = z_unpend_first_thread(&queue->wait_q);
if (first_pending_thread != NULL) {
if (unlikely(first_pending_thread != NULL)) {
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);
prepare_thread_to_run(first_pending_thread, data);

View file

@ -718,7 +718,7 @@ struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
K_SPINLOCK(&_sched_spinlock) {
thread = _priq_wait_best(&wait_q->waitq);
if (thread != NULL) {
if (unlikely(thread != NULL)) {
unpend_thread_no_timeout(thread);
(void)z_abort_thread_timeout(thread);
}

View file

@ -103,7 +103,7 @@ void z_impl_k_sem_give(struct k_sem *sem)
thread = z_unpend_first_thread(&sem->wait_q);
if (thread != NULL) {
if (unlikely(thread != NULL)) {
arch_thread_return_value_set(thread, 0);
z_ready_thread(thread);
} else {
@ -111,7 +111,7 @@ void z_impl_k_sem_give(struct k_sem *sem)
resched = handle_poll_events(sem);
}
if (resched) {
if (unlikely(resched)) {
z_reschedule(&lock, key);
} else {
k_spin_unlock(&lock, key);

View file

@ -113,7 +113,7 @@ int z_impl_k_stack_push(struct k_stack *stack, stack_data_t data)
first_pending_thread = z_unpend_first_thread(&stack->wait_q);
if (first_pending_thread != NULL) {
if (unlikely(first_pending_thread != NULL)) {
z_thread_return_value_set_with_data(first_pending_thread,
0, (void *)data);