kernel: rename thread return value functions

z_set_thread_return_value is part of the core kernel -> arch
interface and has been renamed to z_arch_thread_return_value_set.

z_set_thread_return_value_with_data renamed to
z_thread_return_value_set_with_data for consistency.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-09-21 16:25:56 -07:00 committed by Anas Nashif
commit 4ad9f687df
22 changed files with 31 additions and 31 deletions

View file

@ -42,7 +42,7 @@ extern const int _k_neg_eagain;
* as BASEPRI is not available.
*
* @return -EAGAIN, or a return value set by a call to
* z_set_thread_return_value()
* z_arch_thread_return_value_set()
*
*/
int __swap(int key)

View file

@ -136,7 +136,7 @@ z_arch_switch_to_main_thread(struct k_thread *main_thread,
}
static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->arch.swap_return_value = value;
}

View file

@ -120,7 +120,7 @@ SECTION_FUNC(exception.other, __swap)
/*
* Load return value into r2 (return value register). -EAGAIN unless
* someone previously called z_set_thread_return_value(). Do this before
* someone previously called z_arch_thread_return_value_set(). Do this before
* we potentially unlock interrupts.
*/
ldw r2, _thread_offset_to_retval(r2)

View file

@ -33,7 +33,7 @@ static ALWAYS_INLINE void kernel_arch_init(void)
}
static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->callee_saved.retval = value;
}

View file

@ -30,7 +30,7 @@
*
*
* @return -EAGAIN, or a return value set by a call to
* z_set_thread_return_value()
* z_arch_thread_return_value_set()
*
*/
@ -48,7 +48,7 @@ int __swap(unsigned int key)
*/
_kernel.current->callee_saved.key = key;
_kernel.current->callee_saved.retval = -EAGAIN;
/* retval may be modified with a call to z_set_thread_return_value() */
/* retval may be modified with a call to z_arch_thread_return_value_set() */
posix_thread_status_t *ready_thread_ptr =
(posix_thread_status_t *)

View file

@ -44,7 +44,7 @@ static inline void kernel_arch_init(void)
static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->callee_saved.retval = value;
}

View file

@ -78,7 +78,7 @@ SECTION_FUNC(exception.other, __swap)
* Prior to unlocking irq, load return value of
* __swap to temp register t2 (from
* _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN,
* unless someone has previously called z_set_thread_return_value(..).
* unless someone has previously called z_arch_thread_return_value_set(..).
*/
la t0, _kernel

View file

@ -29,7 +29,7 @@ static ALWAYS_INLINE void kernel_arch_init(void)
}
static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->arch.swap_return_value = value;
}

View file

@ -68,7 +68,7 @@
* potential security leaks.
*
* @return -EAGAIN, or a return value set by a call to
* z_set_thread_return_value()
* z_arch_thread_return_value_set()
*
* C function prototype:
*
@ -117,7 +117,7 @@ SECTION_FUNC(TEXT, __swap)
* Carve space for the return value. Setting it to a default of
* -EAGAIN eliminates the need for the timeout code to set it.
* If another value is ever needed, it can be modified with
* z_set_thread_return_value().
* z_arch_thread_return_value_set().
*/
pushl _k_neg_eagain
@ -342,7 +342,7 @@ CROHandlingDone:
movl _thread_offset_to_esp(%eax), %esp
/* load return value from a possible z_set_thread_return_value() */
/* load return value from a possible z_arch_thread_return_value_set() */
popl %eax
@ -357,7 +357,7 @@ CROHandlingDone:
* %eax may contain one of these values:
*
* - the return value for __swap() that was set up by a call to
* z_set_thread_return_value()
* z_arch_thread_return_value_set()
* - -EINVAL
*/

View file

@ -60,7 +60,7 @@ static inline void kernel_arch_init(void)
* @return N/A
*/
static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
/* write into 'eax' slot created in z_swap() entry */

View file

@ -42,7 +42,7 @@ int z_impl_k_futex_wake(struct k_futex *futex, bool wake_all)
thread = z_unpend_first_thread(&futex_data->wait_q);
if (thread) {
z_ready_thread(thread);
z_set_thread_return_value(thread, 0);
z_arch_thread_return_value_set(thread, 0);
woken++;
}
} while (thread && wake_all);

View file

@ -195,18 +195,18 @@ extern struct z_kernel _kernel;
* z_swap() is in use it's a simple inline provided by the kernel.
*/
static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->swap_retval = value;
}
#endif
static ALWAYS_INLINE void
z_set_thread_return_value_with_data(struct k_thread *thread,
z_thread_return_value_set_with_data(struct k_thread *thread,
unsigned int value,
void *data)
{
z_set_thread_return_value(thread, value);
z_arch_thread_return_value_set(thread, value);
thread->base.swap_data = data;
}

View file

@ -211,7 +211,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
#endif
/* synchronous send: wake up sending thread */
z_set_thread_return_value(sending_thread, 0);
z_arch_thread_return_value_set(sending_thread, 0);
z_mark_thread_as_not_pending(sending_thread);
z_ready_thread(sending_thread);
z_reschedule_unlocked();
@ -257,7 +257,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
z_unpend_thread(receiving_thread);
/* ready receiver for execution */
z_set_thread_return_value(receiving_thread, 0);
z_arch_thread_return_value_set(receiving_thread, 0);
z_ready_thread(receiving_thread);
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)

View file

@ -119,7 +119,7 @@ void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);
if (pending_thread != NULL) {
z_set_thread_return_value_with_data(pending_thread, 0, *mem);
z_thread_return_value_set_with_data(pending_thread, 0, *mem);
z_ready_thread(pending_thread);
z_reschedule(&lock, key);
} else {

View file

@ -126,7 +126,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout)
(void)memcpy(pending_thread->base.swap_data, data,
msgq->msg_size);
/* wake up waiting thread */
z_set_thread_return_value(pending_thread, 0);
z_arch_thread_return_value_set(pending_thread, 0);
z_ready_thread(pending_thread);
z_reschedule(&msgq->lock, key);
return 0;
@ -215,7 +215,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout)
msgq->used_msgs++;
/* wake up waiting thread */
z_set_thread_return_value(pending_thread, 0);
z_arch_thread_return_value_set(pending_thread, 0);
z_ready_thread(pending_thread);
z_reschedule(&msgq->lock, key);
return 0;
@ -287,7 +287,7 @@ void z_impl_k_msgq_purge(struct k_msgq *msgq)
/* wake up any threads that are waiting to write */
while ((pending_thread = z_unpend_first_thread(&msgq->wait_q)) != NULL) {
z_set_thread_return_value(pending_thread, -ENOMSG);
z_arch_thread_return_value_set(pending_thread, -ENOMSG);
z_ready_thread(pending_thread);
}

View file

@ -236,7 +236,7 @@ void z_impl_k_mutex_unlock(struct k_mutex *mutex)
k_spin_unlock(&lock, key);
z_set_thread_return_value(new_owner, 0);
z_arch_thread_return_value_set(new_owner, 0);
/*
* new owner is already of higher or equal prio than first

View file

@ -358,7 +358,7 @@ static int signal_poll_event(struct k_poll_event *event, u32_t state)
}
z_unpend_thread(thread);
z_set_thread_return_value(thread,
z_arch_thread_return_value_set(thread,
state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
if (!z_is_thread_ready(thread)) {

View file

@ -103,7 +103,7 @@ static inline void z_vrfy_k_queue_init(struct k_queue *queue)
static void prepare_thread_to_run(struct k_thread *thread, void *data)
{
z_ready_thread(thread);
z_set_thread_return_value_with_data(thread, 0, data);
z_thread_return_value_set_with_data(thread, 0, data);
}
#endif /* CONFIG_POLL */

View file

@ -110,7 +110,7 @@ static void do_sem_give(struct k_sem *sem)
if (thread != NULL) {
z_ready_thread(thread);
z_set_thread_return_value(thread, 0);
z_arch_thread_return_value_set(thread, 0);
} else {
increment_count_up_to_limit(sem);
handle_poll_events(sem);

View file

@ -106,7 +106,7 @@ void z_impl_k_stack_push(struct k_stack *stack, stack_data_t data)
if (first_pending_thread != NULL) {
z_ready_thread(first_pending_thread);
z_set_thread_return_value_with_data(first_pending_thread,
z_thread_return_value_set_with_data(first_pending_thread,
0, (void *)data);
z_reschedule(&stack->lock, key);
return;

View file

@ -82,7 +82,7 @@ void z_timer_expiration_handler(struct _timeout *t)
z_ready_thread(thread);
z_set_thread_return_value(thread, 0);
z_arch_thread_return_value_set(thread, 0);
}

View file

@ -143,7 +143,7 @@ int pthread_mutex_unlock(pthread_mutex_t *m)
m->owner = (pthread_t)thread;
m->lock_count++;
z_ready_thread(thread);
z_set_thread_return_value(thread, 0);
z_arch_thread_return_value_set(thread, 0);
z_reschedule_irqlock(key);
return 0;
}