kernel: semaphore: optimize code

Remove static helper functions used only once and integrate them into
calling functions.
In k_sem_take, return at the end.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2019-11-26 08:56:25 -05:00
commit 5076a83ef5

View file

@ -70,7 +70,6 @@ int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
return -EINVAL; return -EINVAL;
} }
sys_trace_void(SYS_TRACE_ID_SEMA_INIT); sys_trace_void(SYS_TRACE_ID_SEMA_INIT);
sem->count = initial_count; sem->count = initial_count;
sem->limit = limit; sem->limit = limit;
@ -106,30 +105,21 @@ static inline void handle_poll_events(struct k_sem *sem)
#endif #endif
} }
static inline void increment_count_up_to_limit(struct k_sem *sem) void z_impl_k_sem_give(struct k_sem *sem)
{
sem->count += (sem->count != sem->limit) ? 1U : 0U;
}
static void do_sem_give(struct k_sem *sem)
{ {
k_spinlock_key_t key = k_spin_lock(&lock);
struct k_thread *thread = z_unpend_first_thread(&sem->wait_q); struct k_thread *thread = z_unpend_first_thread(&sem->wait_q);
sys_trace_void(SYS_TRACE_ID_SEMA_GIVE);
if (thread != NULL) { if (thread != NULL) {
z_ready_thread(thread); z_ready_thread(thread);
arch_thread_return_value_set(thread, 0); arch_thread_return_value_set(thread, 0);
} else { } else {
increment_count_up_to_limit(sem); sem->count += (sem->count != sem->limit) ? 1U : 0U;
handle_poll_events(sem); handle_poll_events(sem);
} }
}
void z_impl_k_sem_give(struct k_sem *sem)
{
k_spinlock_key_t key = k_spin_lock(&lock);
sys_trace_void(SYS_TRACE_ID_SEMA_GIVE);
do_sem_give(sem);
sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE); sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE);
z_reschedule(&lock, key); z_reschedule(&lock, key);
} }
@ -145,6 +135,8 @@ static inline void z_vrfy_k_sem_give(struct k_sem *sem)
int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout) int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout)
{ {
int ret = 0;
__ASSERT(((arch_is_in_isr() == false) || (timeout == K_NO_WAIT)), ""); __ASSERT(((arch_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
sys_trace_void(SYS_TRACE_ID_SEMA_TAKE); sys_trace_void(SYS_TRACE_ID_SEMA_TAKE);
@ -153,19 +145,20 @@ int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout)
if (likely(sem->count > 0U)) { if (likely(sem->count > 0U)) {
sem->count--; sem->count--;
k_spin_unlock(&lock, key); k_spin_unlock(&lock, key);
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE); ret = 0;
return 0; goto out;
} }
if (timeout == K_NO_WAIT) { if (timeout == K_NO_WAIT) {
k_spin_unlock(&lock, key); k_spin_unlock(&lock, key);
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE); ret = -EBUSY;
return -EBUSY; goto out;
} }
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE); ret = z_pend_curr(&lock, key, &sem->wait_q, timeout);
int ret = z_pend_curr(&lock, key, &sem->wait_q, timeout); out:
sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE);
return ret; return ret;
} }