kernel: use 'thread' for thread variable consistently

We have been using thread, th and t for thread variables making the code
less readable, especially when we use t for timeouts and other time
related variables. Just use thread where possible and keep things
consistent.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2019-12-19 08:19:45 -05:00
commit 9e3e7f6dda
6 changed files with 107 additions and 101 deletions

View file

@ -146,7 +146,7 @@ static inline bool size_mul_overflow(size_t a, size_t b, size_t *result)
* return NULL; * return NULL;
* } * }
* *
* struct k_thread *t = NULL; * struct k_thread *thread = NULL;
* sys_dlist_t *l = * sys_dlist_t *l =
* &pq->queues[u32_count_trailing_zeros(pq->bitmask)]; * &pq->queues[u32_count_trailing_zeros(pq->bitmask)];
* *

View file

@ -255,10 +255,10 @@ static ALWAYS_INLINE void z_ready_thread(struct k_thread *thread)
static inline void _ready_one_thread(_wait_q_t *wq) static inline void _ready_one_thread(_wait_q_t *wq)
{ {
struct k_thread *th = z_unpend_first_thread(wq); struct k_thread *thread = z_unpend_first_thread(wq);
if (th != NULL) { if (thread != NULL) {
z_ready_thread(th); z_ready_thread(thread);
} }
} }

View file

@ -296,15 +296,15 @@ void __weak main(void)
/* LCOV_EXCL_STOP */ /* LCOV_EXCL_STOP */
#if defined(CONFIG_MULTITHREADING) #if defined(CONFIG_MULTITHREADING)
static void init_idle_thread(struct k_thread *thr, k_thread_stack_t *stack) static void init_idle_thread(struct k_thread *thread, k_thread_stack_t *stack)
{ {
z_setup_new_thread(thr, stack, z_setup_new_thread(thread, stack,
CONFIG_IDLE_STACK_SIZE, idle, NULL, NULL, NULL, CONFIG_IDLE_STACK_SIZE, idle, NULL, NULL, NULL,
K_LOWEST_THREAD_PRIO, K_ESSENTIAL, IDLE_THREAD_NAME); K_LOWEST_THREAD_PRIO, K_ESSENTIAL, IDLE_THREAD_NAME);
z_mark_thread_as_started(thr); z_mark_thread_as_started(thread);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
thr->base.is_idle = 1U; thread->base.is_idle = 1U;
#endif #endif
} }
#endif /* CONFIG_MULTITHREADING */ #endif /* CONFIG_MULTITHREADING */

View file

@ -80,9 +80,10 @@ static inline bool is_thread_dummy(struct k_thread *thread)
} }
#endif #endif
bool z_is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2) bool z_is_t1_higher_prio_than_t2(struct k_thread *thread_1,
struct k_thread *thread_2)
{ {
if (t1->base.prio < t2->base.prio) { if (thread_1->base.prio < thread_2->base.prio) {
return true; return true;
} }
@ -93,10 +94,10 @@ bool z_is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2)
* deadlines when the job is complete. Letting the deadlines * deadlines when the job is complete. Letting the deadlines
* go negative is fine and in fact prevents aliasing bugs. * go negative is fine and in fact prevents aliasing bugs.
*/ */
if (t1->base.prio == t2->base.prio) { if (thread_1->base.prio == thread_2->base.prio) {
int now = (int) k_cycle_get_32(); int now = (int) k_cycle_get_32();
int dt1 = t1->base.prio_deadline - now; int dt1 = thread_1->base.prio_deadline - now;
int dt2 = t2->base.prio_deadline - now; int dt2 = thread_2->base.prio_deadline - now;
return dt1 < dt2; return dt1 < dt2;
} }
@ -105,7 +106,8 @@ bool z_is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2)
return false; return false;
} }
static ALWAYS_INLINE bool should_preempt(struct k_thread *th, int preempt_ok) static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
int preempt_ok)
{ {
/* Preemption is OK if it's being explicitly allowed by /* Preemption is OK if it's being explicitly allowed by
* software state (e.g. the thread called k_yield()) * software state (e.g. the thread called k_yield())
@ -127,14 +129,14 @@ static ALWAYS_INLINE bool should_preempt(struct k_thread *th, int preempt_ok)
* hit this. * hit this.
*/ */
if (IS_ENABLED(CONFIG_SWAP_NONATOMIC) if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
&& z_is_thread_timeout_active(th)) { && z_is_thread_timeout_active(thread)) {
return true; return true;
} }
/* Otherwise we have to be running a preemptible thread or /* Otherwise we have to be running a preemptible thread or
* switching to a metairq * switching to a metairq
*/ */
if (is_preempt(_current) || is_metairq(th)) { if (is_preempt(_current) || is_metairq(thread)) {
return true; return true;
} }
@ -156,11 +158,11 @@ static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
/* With masks enabled we need to be prepared to walk the list /* With masks enabled we need to be prepared to walk the list
* looking for one we can run * looking for one we can run
*/ */
struct k_thread *t; struct k_thread *thread;
SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) { SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) {
if ((t->base.cpu_mask & BIT(_current_cpu->id)) != 0) { if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) {
return t; return thread;
} }
} }
return NULL; return NULL;
@ -169,7 +171,7 @@ static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
static ALWAYS_INLINE struct k_thread *next_up(void) static ALWAYS_INLINE struct k_thread *next_up(void)
{ {
struct k_thread *th = _priq_run_best(&_kernel.ready_q.runq); struct k_thread *thread = _priq_run_best(&_kernel.ready_q.runq);
#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0) #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
/* MetaIRQs must always attempt to return back to a /* MetaIRQs must always attempt to return back to a
@ -179,9 +181,9 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
*/ */
struct k_thread *mirqp = _current_cpu->metairq_preempted; struct k_thread *mirqp = _current_cpu->metairq_preempted;
if (mirqp != NULL && (th == NULL || !is_metairq(th))) { if (mirqp != NULL && (thread == NULL || !is_metairq(thread))) {
if (!z_is_thread_prevented_from_running(mirqp)) { if (!z_is_thread_prevented_from_running(mirqp)) {
th = mirqp; thread = mirqp;
} else { } else {
_current_cpu->metairq_preempted = NULL; _current_cpu->metairq_preempted = NULL;
} }
@ -195,7 +197,7 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
* responsible for putting it back in z_swap and ISR return!), * responsible for putting it back in z_swap and ISR return!),
* which makes this choice simple. * which makes this choice simple.
*/ */
return th ? th : _current_cpu->idle_thread; return thread ? thread : _current_cpu->idle_thread;
#else #else
/* Under SMP, the "cache" mechanism for selecting the next /* Under SMP, the "cache" mechanism for selecting the next
* thread doesn't work, so we have more work to do to test * thread doesn't work, so we have more work to do to test
@ -211,35 +213,35 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
int queued = z_is_thread_queued(_current); int queued = z_is_thread_queued(_current);
int active = !z_is_thread_prevented_from_running(_current); int active = !z_is_thread_prevented_from_running(_current);
if (th == NULL) { if (thread == NULL) {
th = _current_cpu->idle_thread; thread = _current_cpu->idle_thread;
} }
if (active) { if (active) {
if (!queued && if (!queued &&
!z_is_t1_higher_prio_than_t2(th, _current)) { !z_is_t1_higher_prio_than_t2(thread, _current)) {
th = _current; thread = _current;
} }
if (!should_preempt(th, _current_cpu->swap_ok)) { if (!should_preempt(thread, _current_cpu->swap_ok)) {
th = _current; thread = _current;
} }
} }
/* Put _current back into the queue */ /* Put _current back into the queue */
if (th != _current && active && !z_is_idle_thread_object(_current) && if (thread != _current && active &&
!queued) { !z_is_idle_thread_object(_current) && !queued) {
_priq_run_add(&_kernel.ready_q.runq, _current); _priq_run_add(&_kernel.ready_q.runq, _current);
z_mark_thread_as_queued(_current); z_mark_thread_as_queued(_current);
} }
/* Take the new _current out of the queue */ /* Take the new _current out of the queue */
if (z_is_thread_queued(th)) { if (z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, th); _priq_run_remove(&_kernel.ready_q.runq, thread);
} }
z_mark_thread_as_not_queued(th); z_mark_thread_as_not_queued(thread);
return th; return thread;
#endif #endif
} }
@ -279,12 +281,12 @@ void k_sched_time_slice_set(s32_t slice, int prio)
} }
} }
static inline int sliceable(struct k_thread *t) static inline int sliceable(struct k_thread *thread)
{ {
return is_preempt(t) return is_preempt(thread)
&& !z_is_prio_higher(t->base.prio, slice_max_prio) && !z_is_prio_higher(thread->base.prio, slice_max_prio)
&& !z_is_idle_thread_object(t) && !z_is_idle_thread_object(thread)
&& !z_is_thread_timeout_active(t); && !z_is_thread_timeout_active(thread);
} }
/* Called out of each timer interrupt */ /* Called out of each timer interrupt */
@ -315,13 +317,14 @@ void z_time_slice(int ticks)
* them specifically. Called at the moment a new thread has been * them specifically. Called at the moment a new thread has been
* selected to run. * selected to run.
*/ */
static void update_metairq_preempt(struct k_thread *th) static void update_metairq_preempt(struct k_thread *thread)
{ {
#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0) #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
if (is_metairq(th) && !is_metairq(_current) && !is_preempt(_current)) { if (is_metairq(thread) && !is_metairq(_current) &&
!is_preempt(_current)) {
/* Record new preemption */ /* Record new preemption */
_current_cpu->metairq_preempted = _current; _current_cpu->metairq_preempted = _current;
} else if (!is_metairq(th)) { } else if (!is_metairq(thread)) {
/* Returning from existing preemption */ /* Returning from existing preemption */
_current_cpu->metairq_preempted = NULL; _current_cpu->metairq_preempted = NULL;
} }
@ -331,16 +334,16 @@ static void update_metairq_preempt(struct k_thread *th)
static void update_cache(int preempt_ok) static void update_cache(int preempt_ok)
{ {
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
struct k_thread *th = next_up(); struct k_thread *thread = next_up();
if (should_preempt(th, preempt_ok)) { if (should_preempt(thread, preempt_ok)) {
#ifdef CONFIG_TIMESLICING #ifdef CONFIG_TIMESLICING
if (th != _current) { if (thread != _current) {
z_reset_time_slice(); z_reset_time_slice();
} }
#endif #endif
update_metairq_preempt(th); update_metairq_preempt(thread);
_kernel.ready_q.cache = th; _kernel.ready_q.cache = thread;
} else { } else {
_kernel.ready_q.cache = _current; _kernel.ready_q.cache = _current;
} }
@ -458,16 +461,17 @@ ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread)
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
/* Timeout handler for *_thread_timeout() APIs */ /* Timeout handler for *_thread_timeout() APIs */
void z_thread_timeout(struct _timeout *to) void z_thread_timeout(struct _timeout *timeout)
{ {
struct k_thread *th = CONTAINER_OF(to, struct k_thread, base.timeout); struct k_thread *thread = CONTAINER_OF(timeout,
struct k_thread, base.timeout);
if (th->base.pended_on != NULL) { if (thread->base.pended_on != NULL) {
z_unpend_thread_no_timeout(th); z_unpend_thread_no_timeout(thread);
} }
z_mark_thread_as_started(th); z_mark_thread_as_started(thread);
z_mark_thread_as_not_suspended(th); z_mark_thread_as_not_suspended(thread);
z_ready_thread(th); z_ready_thread(thread);
} }
#endif #endif
@ -502,13 +506,13 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q) struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
{ {
struct k_thread *t = z_unpend1_no_timeout(wait_q); struct k_thread *thread = z_unpend1_no_timeout(wait_q);
if (t != NULL) { if (thread != NULL) {
(void)z_abort_thread_timeout(t); (void)z_abort_thread_timeout(thread);
} }
return t; return thread;
} }
void z_unpend_thread(struct k_thread *thread) void z_unpend_thread(struct k_thread *thread)
@ -640,16 +644,16 @@ void *z_get_next_switch_handle(void *interrupted)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
LOCKED(&sched_spinlock) { LOCKED(&sched_spinlock) {
struct k_thread *th = next_up(); struct k_thread *thread = next_up();
if (_current != th) { if (_current != thread) {
update_metairq_preempt(th); update_metairq_preempt(thread);
#ifdef CONFIG_TIMESLICING #ifdef CONFIG_TIMESLICING
z_reset_time_slice(); z_reset_time_slice();
#endif #endif
_current_cpu->swap_ok = 0; _current_cpu->swap_ok = 0;
set_current(th); set_current(thread);
#ifdef CONFIG_SPIN_VALIDATE #ifdef CONFIG_SPIN_VALIDATE
/* Changed _current! Update the spinlock /* Changed _current! Update the spinlock
* bookeeping so the validation doesn't get * bookeeping so the validation doesn't get
@ -709,28 +713,29 @@ void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
struct k_thread *z_priq_dumb_best(sys_dlist_t *pq) struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
{ {
struct k_thread *t = NULL; struct k_thread *thread = NULL;
sys_dnode_t *n = sys_dlist_peek_head(pq); sys_dnode_t *n = sys_dlist_peek_head(pq);
if (n != NULL) { if (n != NULL) {
t = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
} }
return t; return thread;
} }
bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b) bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
{ {
struct k_thread *ta, *tb; struct k_thread *thread_a, *thread_b;
ta = CONTAINER_OF(a, struct k_thread, base.qnode_rb); thread_a = CONTAINER_OF(a, struct k_thread, base.qnode_rb);
tb = CONTAINER_OF(b, struct k_thread, base.qnode_rb); thread_b = CONTAINER_OF(b, struct k_thread, base.qnode_rb);
if (z_is_t1_higher_prio_than_t2(ta, tb)) { if (z_is_t1_higher_prio_than_t2(thread_a, thread_b)) {
return true; return true;
} else if (z_is_t1_higher_prio_than_t2(tb, ta)) { } else if (z_is_t1_higher_prio_than_t2(thread_b, thread_a)) {
return false; return false;
} else { } else {
return ta->base.order_key < tb->base.order_key ? 1 : 0; return thread_a->base.order_key < thread_b->base.order_key
? 1 : 0;
} }
} }
@ -776,13 +781,13 @@ void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
struct k_thread *z_priq_rb_best(struct _priq_rb *pq) struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
{ {
struct k_thread *t = NULL; struct k_thread *thread = NULL;
struct rbnode *n = rb_get_min(&pq->tree); struct rbnode *n = rb_get_min(&pq->tree);
if (n != NULL) { if (n != NULL) {
t = CONTAINER_OF(n, struct k_thread, base.qnode_rb); thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb);
} }
return t; return thread;
} }
#ifdef CONFIG_SCHED_MULTIQ #ifdef CONFIG_SCHED_MULTIQ
@ -821,24 +826,24 @@ struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
return NULL; return NULL;
} }
struct k_thread *t = NULL; struct k_thread *thread = NULL;
sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)]; sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)];
sys_dnode_t *n = sys_dlist_peek_head(l); sys_dnode_t *n = sys_dlist_peek_head(l);
if (n != NULL) { if (n != NULL) {
t = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
} }
return t; return thread;
} }
int z_unpend_all(_wait_q_t *wait_q) int z_unpend_all(_wait_q_t *wait_q)
{ {
int need_sched = 0; int need_sched = 0;
struct k_thread *th; struct k_thread *thread;
while ((th = z_waitq_head(wait_q)) != NULL) { while ((thread = z_waitq_head(wait_q)) != NULL) {
z_unpend_thread(th); z_unpend_thread(thread);
z_ready_thread(th); z_ready_thread(thread);
need_sched = 1; need_sched = 1;
} }
@ -917,13 +922,13 @@ static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
#ifdef CONFIG_SCHED_DEADLINE #ifdef CONFIG_SCHED_DEADLINE
void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline) void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
{ {
struct k_thread *th = tid; struct k_thread *thread = tid;
LOCKED(&sched_spinlock) { LOCKED(&sched_spinlock) {
th->base.prio_deadline = k_cycle_get_32() + deadline; thread->base.prio_deadline = k_cycle_get_32() + deadline;
if (z_is_thread_queued(th)) { if (z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, th); _priq_run_remove(&_kernel.ready_q.runq, thread);
_priq_run_add(&_kernel.ready_q.runq, th); _priq_run_add(&_kernel.ready_q.runq, thread);
} }
} }
} }
@ -931,7 +936,7 @@ void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline) static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
{ {
struct k_thread *thread = (struct k_thread *)thread_p; struct k_thread *thread = tid;
Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Z_OOPS(Z_SYSCALL_VERIFY_MSG(deadline > 0, Z_OOPS(Z_SYSCALL_VERIFY_MSG(deadline > 0,
@ -1182,14 +1187,14 @@ BUILD_ASSERT_MSG(CONFIG_MP_NUM_CPUS <= 8, "Too many CPUs for mask word");
# endif # endif
static int cpu_mask_mod(k_tid_t t, u32_t enable_mask, u32_t disable_mask) static int cpu_mask_mod(k_tid_t thread, u32_t enable_mask, u32_t disable_mask)
{ {
int ret = 0; int ret = 0;
LOCKED(&sched_spinlock) { LOCKED(&sched_spinlock) {
if (z_is_thread_prevented_from_running(t)) { if (z_is_thread_prevented_from_running(thread)) {
t->base.cpu_mask |= enable_mask; thread->base.cpu_mask |= enable_mask;
t->base.cpu_mask &= ~disable_mask; thread->base.cpu_mask &= ~disable_mask;
} else { } else {
ret = -EINVAL; ret = -EINVAL;
} }

View file

@ -299,30 +299,31 @@ const char *k_thread_state_str(k_tid_t thread_id)
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_thread_name_copy(k_tid_t t, char *buf, size_t size) static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
char *buf, size_t size)
{ {
#ifdef CONFIG_THREAD_NAME #ifdef CONFIG_THREAD_NAME
size_t len; size_t len;
struct _k_object *ko = z_object_find(t); struct _k_object *ko = z_object_find(thread);
/* Special case: we allow reading the names of initialized threads /* Special case: we allow reading the names of initialized threads
* even if we don't have permission on them * even if we don't have permission on them
*/ */
if (t == NULL || ko->type != K_OBJ_THREAD || if (thread == NULL || ko->type != K_OBJ_THREAD ||
(ko->flags & K_OBJ_FLAG_INITIALIZED) == 0) { (ko->flags & K_OBJ_FLAG_INITIALIZED) == 0) {
return -EINVAL; return -EINVAL;
} }
if (Z_SYSCALL_MEMORY_WRITE(buf, size) != 0) { if (Z_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
return -EFAULT; return -EFAULT;
} }
len = strlen(t->name); len = strlen(thread->name);
if (len + 1 > size) { if (len + 1 > size) {
return -ENOSPC; return -ENOSPC;
} }
return z_user_to_copy((void *)buf, t->name, len + 1); return z_user_to_copy((void *)buf, thread->name, len + 1);
#else #else
ARG_UNUSED(t); ARG_UNUSED(thread);
ARG_UNUSED(buf); ARG_UNUSED(buf);
ARG_UNUSED(size); ARG_UNUSED(size);
return -ENOSYS; return -ENOSYS;

View file

@ -340,11 +340,11 @@ void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
} }
#endif /* CONFIG_DYNAMIC_OBJECTS */ #endif /* CONFIG_DYNAMIC_OBJECTS */
static int thread_index_get(struct k_thread *t) static int thread_index_get(struct k_thread *thread)
{ {
struct _k_object *ko; struct _k_object *ko;
ko = z_object_find(t); ko = z_object_find(thread);
if (ko == NULL) { if (ko == NULL) {
return -1; return -1;