unified: remove last instances of struct tcs

Change-Id: I956bf0e96266e68ac1743f02a82ffafe77ebb0e8
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2016-10-05 17:32:01 -04:00
commit b7ef0cba5f
14 changed files with 73 additions and 72 deletions

View file

@ -94,7 +94,7 @@ struct k_mem_map;
struct k_mem_pool;
struct k_timer;
typedef struct tcs *k_tid_t;
typedef struct k_thread *k_tid_t;
/* threads/scheduler/execution contexts */
@ -654,7 +654,7 @@ static inline int k_delayed_work_submit(struct k_delayed_work *work,
struct k_mutex {
_wait_q_t wait_q;
struct tcs *owner;
struct k_thread *owner;
uint32_t lock_count;
int owner_orig_prio;
#ifdef CONFIG_OBJECT_MONITOR

View file

@ -65,9 +65,6 @@ extern void _new_thread(char *pStack, unsigned stackSize,
/* context switching and scheduling-related routines */
extern void _nano_fiber_ready(struct tcs *tcs);
extern void _nano_fiber_swap(void);
extern unsigned int _Swap(unsigned int);
/* set and clear essential fiber/task flag */

View file

@ -25,17 +25,17 @@
extern k_tid_t const _main_thread;
extern k_tid_t const _idle_thread;
extern void _add_thread_to_ready_q(struct tcs *t);
extern void _remove_thread_from_ready_q(struct tcs *t);
extern void _add_thread_to_ready_q(struct k_thread *thread);
extern void _remove_thread_from_ready_q(struct k_thread *thread);
extern void _reschedule_threads(int key);
extern void k_sched_unlock(void);
extern void _pend_thread(struct tcs *thread,
extern void _pend_thread(struct k_thread *thread,
_wait_q_t *wait_q, int32_t timeout);
extern void _pend_current_thread(_wait_q_t *wait_q, int32_t timeout);
extern void _move_thread_to_end_of_prio_q(struct k_thread *thread);
extern struct tcs *_get_next_ready_thread(void);
extern struct k_thread *_get_next_ready_thread(void);
extern int __must_switch_threads(void);
extern void k_thread_priority_set(struct tcs *thread, int32_t priority);
extern void k_thread_priority_set(struct k_thread *thread, int32_t priority);
extern int k_current_priority_get(void);
extern int32_t _ms_to_ticks(int32_t ms);
@ -62,24 +62,25 @@ static inline int _is_prio_higher(int prio, int test_prio)
return _is_prio1_higher_than_prio2(prio, test_prio);
}
static inline int _is_t1_higher_prio_than_t2(struct tcs *t1, struct tcs *t2)
static inline int _is_t1_higher_prio_than_t2(struct k_thread *t1,
struct k_thread *t2)
{
return _is_prio1_higher_than_prio2(t1->prio, t2->prio);
}
static inline int _is_higher_prio_than_current(struct tcs *thread)
static inline int _is_higher_prio_than_current(struct k_thread *thread)
{
return _is_t1_higher_prio_than_t2(thread, _nanokernel.current);
}
/* is thread currenlty cooperative ? */
static inline int _is_coop(struct tcs *thread)
static inline int _is_coop(struct k_thread *thread)
{
return thread->prio < 0;
}
/* is thread currently preemptible ? */
static inline int _is_preempt(struct tcs *thread)
static inline int _is_preempt(struct k_thread *thread)
{
return !_is_coop(thread) && !atomic_get(&thread->sched_locked);
}
@ -188,60 +189,60 @@ static inline void _reset_thread_states(struct k_thread *thread,
}
/* mark a thread as being suspended */
static inline void _mark_thread_as_suspended(struct tcs *thread)
static inline void _mark_thread_as_suspended(struct k_thread *thread)
{
thread->flags |= K_SUSPENDED;
}
/* mark a thread as not being suspended */
static inline void _mark_thread_as_not_suspended(struct tcs *thread)
static inline void _mark_thread_as_not_suspended(struct k_thread *thread)
{
thread->flags &= ~K_SUSPENDED;
}
/* mark a thread as being in the timer queue */
static inline void _mark_thread_as_timing(struct tcs *thread)
static inline void _mark_thread_as_timing(struct k_thread *thread)
{
thread->flags |= K_TIMING;
}
/* mark a thread as not being in the timer queue */
static inline void _mark_thread_as_not_timing(struct tcs *thread)
static inline void _mark_thread_as_not_timing(struct k_thread *thread)
{
thread->flags &= ~K_TIMING;
}
/* check if a thread is on the timer queue */
static inline int _is_thread_timing(struct tcs *thread)
static inline int _is_thread_timing(struct k_thread *thread)
{
return !!(thread->flags & K_TIMING);
}
static inline int _has_thread_started(struct tcs *thread)
static inline int _has_thread_started(struct k_thread *thread)
{
return !(thread->flags & K_PRESTART);
}
/* check if a thread is ready */
static inline int _is_thread_ready(struct tcs *thread)
static inline int _is_thread_ready(struct k_thread *thread)
{
return (thread->flags & K_EXECUTION_MASK) == K_READY;
}
/* mark a thread as pending in its TCS */
static inline void _mark_thread_as_pending(struct tcs *thread)
static inline void _mark_thread_as_pending(struct k_thread *thread)
{
thread->flags |= K_PENDING;
}
/* mark a thread as not pending in its TCS */
static inline void _mark_thread_as_not_pending(struct tcs *thread)
static inline void _mark_thread_as_not_pending(struct k_thread *thread)
{
thread->flags &= ~K_PENDING;
}
/* check if a thread is pending */
static inline int _is_thread_pending(struct tcs *thread)
static inline int _is_thread_pending(struct k_thread *thread)
{
return !!(thread->flags & K_PENDING);
}
@ -251,7 +252,7 @@ static inline int _is_thread_pending(struct tcs *thread)
* then add it to the ready queue according to its priority.
*/
/* must be called with interrupts locked */
static inline void _ready_thread(struct tcs *thread)
static inline void _ready_thread(struct k_thread *thread)
{
__ASSERT(_is_prio_higher(thread->prio, K_LOWEST_THREAD_PRIO) ||
((thread->prio == K_LOWEST_THREAD_PRIO) &&
@ -278,7 +279,7 @@ static inline void _ready_thread(struct tcs *thread)
*
* This routine must be called with interrupts locked.
*/
static inline void _mark_thread_as_started(struct tcs *thread)
static inline void _mark_thread_as_started(struct k_thread *thread)
{
thread->flags &= ~K_PRESTART;
}
@ -288,7 +289,7 @@ static inline void _mark_thread_as_started(struct tcs *thread)
*
* This routine must be called with interrupts locked.
*/
static inline void _mark_thread_as_dead(struct tcs *thread)
static inline void _mark_thread_as_dead(struct k_thread *thread)
{
thread->flags |= K_DEAD;
}
@ -299,7 +300,7 @@ static inline void _mark_thread_as_dead(struct tcs *thread)
* Get a thread's priority. Note that it might have changed by the time this
* function returns.
*/
static inline int32_t k_thread_priority_get(struct tcs *thread)
static inline int32_t k_thread_priority_get(struct k_thread *thread)
{
return thread->prio;
}
@ -309,7 +310,7 @@ static inline int32_t k_thread_priority_get(struct tcs *thread)
* queue.
*/
/* must be called with interrupts locked */
static inline void _thread_priority_set(struct tcs *thread, int prio)
static inline void _thread_priority_set(struct k_thread *thread, int prio)
{
if (_is_thread_ready(thread)) {
_remove_thread_from_ready_q(thread);
@ -327,7 +328,7 @@ static inline struct k_thread *_peek_first_pending_thread(_wait_q_t *wait_q)
}
/* unpend the first thread from a wait queue */
static inline struct tcs *_unpend_first_thread(_wait_q_t *wait_q)
static inline struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
{
struct k_thread *thread = (struct k_thread *)sys_dlist_get(wait_q);

View file

@ -35,13 +35,13 @@ extern "C" {
#elif defined(CONFIG_NANO_TIMERS)
#include <timeout_q.h>
#define _init_thread_timeout(tcs) do { } while ((0))
#define _abort_thread_timeout(tcs) do { } while ((0))
#define _init_thread_timeout(thread) do { } while ((0))
#define _abort_thread_timeout(thread) do { } while ((0))
#define _add_thread_timeout(thread, pq, ticks) do { } while (0)
#else
#define _init_thread_timeout(tcs) do { } while ((0))
#define _abort_thread_timeout(tcs) do { } while ((0))
#define _init_thread_timeout(thread) do { } while ((0))
#define _abort_thread_timeout(thread) do { } while ((0))
#define _get_next_timeout_expiry() (K_FOREVER)
#define _add_thread_timeout(thread, pq, ticks) do { } while (0)

View file

@ -173,7 +173,7 @@ static int _mbox_message_match(struct k_mbox_msg *tx_msg,
*/
static void _mbox_message_dispose(struct k_mbox_msg *rx_msg)
{
struct tcs *sending_thread;
struct k_thread *sending_thread;
struct k_mbox_msg *tx_msg;
unsigned int key;
@ -237,8 +237,8 @@ static void _mbox_message_dispose(struct k_mbox_msg *rx_msg)
static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
int32_t timeout)
{
struct tcs *sending_thread;
struct tcs *receiving_thread;
struct k_thread *sending_thread;
struct k_thread *receiving_thread;
struct k_mbox_msg *rx_msg;
sys_dnode_t *wait_q_item;
unsigned int key;
@ -254,7 +254,7 @@ static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
key = irq_lock();
SYS_DLIST_FOR_EACH_NODE(&mbox->rx_msg_queue, wait_q_item) {
receiving_thread = (struct tcs *)wait_q_item;
receiving_thread = (struct k_thread *)wait_q_item;
rx_msg = (struct k_mbox_msg *)receiving_thread->swap_data;
if (_mbox_message_match(tx_msg, rx_msg) == 0) {
@ -364,7 +364,7 @@ void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
async->thread.prio = _current->prio;
async->tx_msg = *tx_msg;
async->tx_msg._syncing_thread = (struct tcs *)&async->thread;
async->tx_msg._syncing_thread = (struct k_thread *)&async->thread;
async->tx_msg._async_sem = sem;
_mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
@ -513,7 +513,7 @@ static int _mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
int32_t timeout)
{
struct tcs *sending_thread;
struct k_thread *sending_thread;
struct k_mbox_msg *tx_msg;
sys_dnode_t *wait_q_item;
unsigned int key;
@ -526,7 +526,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
key = irq_lock();
SYS_DLIST_FOR_EACH_NODE(&mbox->tx_msg_queue, wait_q_item) {
sending_thread = (struct tcs *)wait_q_item;
sending_thread = (struct k_thread *)wait_q_item;
tx_msg = (struct k_mbox_msg *)sending_thread->swap_data;
if (_mbox_message_match(tx_msg, rx_msg) == 0) {

View file

@ -150,7 +150,7 @@ int k_mem_map_alloc(struct k_mem_map *map, void **mem, int32_t timeout)
void k_mem_map_free(struct k_mem_map *map, void **mem)
{
int key = irq_lock();
struct tcs *pending_thread = _unpend_first_thread(&map->wait_q);
struct k_thread *pending_thread = _unpend_first_thread(&map->wait_q);
if (pending_thread) {
_set_thread_return_value_with_data(pending_thread, 0, *mem);

View file

@ -68,7 +68,7 @@ void k_msgq_init(struct k_msgq *q, uint32_t msg_size, uint32_t max_msgs,
int k_msgq_put(struct k_msgq *q, void *data, int32_t timeout)
{
unsigned int key = irq_lock();
struct tcs *pending_thread;
struct k_thread *pending_thread;
int result;
if (q->used_msgs < q->max_msgs) {
@ -124,7 +124,7 @@ int k_msgq_put(struct k_msgq *q, void *data, int32_t timeout)
int k_msgq_get(struct k_msgq *q, void *data, int32_t timeout)
{
unsigned int key = irq_lock();
struct tcs *pending_thread;
struct k_thread *pending_thread;
int result;
if (q->used_msgs > 0) {
@ -186,7 +186,7 @@ int k_msgq_get(struct k_msgq *q, void *data, int32_t timeout)
void k_msgq_purge(struct k_msgq *q)
{
unsigned int key = irq_lock();
struct tcs *pending_thread;
struct k_thread *pending_thread;
/* wake up any threads that are waiting to write */
while ((pending_thread = _unpend_first_thread(&q->wait_q)) != NULL) {

View file

@ -172,7 +172,8 @@ int k_mutex_lock(struct k_mutex *mutex, int32_t timeout)
K_DEBUG("%p timeout on mutex %p\n", _current, mutex);
struct tcs *waiter = (struct tcs *)sys_dlist_peek_head(&mutex->wait_q);
struct k_thread *waiter =
(struct k_thread *)sys_dlist_peek_head(&mutex->wait_q);
new_prio = mutex->owner_orig_prio;
new_prio = waiter ? new_prio_for_inheritance(waiter->prio, new_prio) :
@ -212,7 +213,7 @@ void k_mutex_unlock(struct k_mutex *mutex)
adjust_owner_prio(mutex, mutex->owner_orig_prio);
struct tcs *new_owner = _unpend_first_thread(&mutex->wait_q);
struct k_thread *new_owner = _unpend_first_thread(&mutex->wait_q);
K_DEBUG("new owner of mutex %p: %p (prio: %d)\n",
mutex, new_owner, new_owner ? new_owner->prio : -1000);

View file

@ -30,7 +30,7 @@ struct offload_work {
struct k_work work_item;
int (*offload_func)();
void *offload_args;
struct tcs *thread;
struct k_thread *thread;
};
static struct k_work_q offload_work_q;

View file

@ -491,7 +491,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
*/
key = irq_lock();
_sched_unlock_no_reschedule();
_pend_thread((struct tcs *) &async_desc->thread,
_pend_thread((struct k_thread *) &async_desc->thread,
&pipe->wait_q.writers, K_FOREVER);
_reschedule_threads(key);
return 0;

View file

@ -48,7 +48,7 @@ static void _clear_ready_q_prio_bit(int prio)
* Interrupts must be locked when calling this function.
*/
void _add_thread_to_ready_q(struct tcs *thread)
void _add_thread_to_ready_q(struct k_thread *thread)
{
int q_index = _get_ready_q_q_index(thread->prio);
sys_dlist_t *q = &_nanokernel.ready_q.q[q_index];
@ -69,7 +69,7 @@ void _add_thread_to_ready_q(struct tcs *thread)
* Interrupts must be locked when calling this function.
*/
void _remove_thread_from_ready_q(struct tcs *thread)
void _remove_thread_from_ready_q(struct k_thread *thread)
{
int q_index = _get_ready_q_q_index(thread->prio);
sys_dlist_t *q = &_nanokernel.ready_q.q[q_index];
@ -121,7 +121,8 @@ void k_sched_unlock(void)
*/
static int _is_wait_q_insert_point(sys_dnode_t *dnode_info, void *insert_prio)
{
struct tcs *waitq_node = CONTAINER_OF(dnode_info, struct tcs, k_q_node);
struct k_thread *waitq_node =
CONTAINER_OF(dnode_info, struct k_thread, k_q_node);
return _is_prio_higher((int)insert_prio, waitq_node->prio);
}
@ -140,7 +141,7 @@ int32_t _ms_to_ticks(int32_t ms)
/* pend the specified thread: it must *not* be in the ready queue */
/* must be called with interrupts locked */
void _pend_thread(struct tcs *thread, _wait_q_t *wait_q, int32_t timeout)
void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, int32_t timeout)
{
sys_dlist_t *dlist = (sys_dlist_t *)wait_q;
@ -215,7 +216,7 @@ int _is_next_thread_current(void)
}
/* application API: change a thread's priority. Not callable from ISR */
void k_thread_priority_set(struct tcs *thread, int prio)
void k_thread_priority_set(struct k_thread *thread, int prio)
{
__ASSERT(!_is_in_isr(), "");

View file

@ -179,7 +179,7 @@ void *k_thread_custom_data_get(void)
*
* @return N/A
*/
void _thread_exit(struct tcs *thread)
void _thread_exit(struct k_thread *thread)
{
/*
* Remove thread from the list of threads. This singly linked list of
@ -190,7 +190,7 @@ void _thread_exit(struct tcs *thread)
if (thread == _nanokernel.threads) {
_nanokernel.threads = _nanokernel.threads->next_thread;
} else {
struct tcs *prev_thread;
struct k_thread *prev_thread;
prev_thread = _nanokernel.threads;
while (thread != prev_thread->next_thread) {
@ -247,7 +247,7 @@ FUNC_NORETURN void _thread_entry(void (*entry)(void *, void *, void *),
CODE_UNREACHABLE;
}
static void start_thread(struct tcs *thread)
static void start_thread(struct k_thread *thread)
{
int key = irq_lock(); /* protect kernel queues */
@ -286,7 +286,7 @@ k_tid_t k_thread_spawn(char *stack, unsigned stack_size,
{
__ASSERT(!_is_in_isr(), "");
struct tcs *new_thread = (struct tcs *)stack;
struct k_thread *new_thread = (struct k_thread *)stack;
_new_thread(stack, stack_size, NULL, entry, p1, p2, p3, prio, options);
@ -297,7 +297,7 @@ k_tid_t k_thread_spawn(char *stack, unsigned stack_size,
int k_thread_cancel(k_tid_t tid)
{
struct tcs *thread = tid;
struct k_thread *thread = tid;
int key = irq_lock();
@ -320,7 +320,7 @@ static inline int is_in_any_group(struct _static_thread_data *thread_data,
return !!(thread_data->init_groups & groups);
}
void _k_thread_group_op(uint32_t groups, void (*func)(struct tcs *))
void _k_thread_group_op(uint32_t groups, void (*func)(struct k_thread *))
{
unsigned int key;
@ -353,7 +353,7 @@ void _k_thread_group_op(uint32_t groups, void (*func)(struct tcs *))
_Swap(key);
}
void _k_thread_single_start(struct tcs *thread)
void _k_thread_single_start(struct k_thread *thread)
{
_mark_thread_as_started(thread);
@ -362,7 +362,7 @@ void _k_thread_single_start(struct tcs *thread)
}
}
void _k_thread_single_suspend(struct tcs *thread)
void _k_thread_single_suspend(struct k_thread *thread)
{
if (_is_thread_ready(thread)) {
_remove_thread_from_ready_q(thread);
@ -371,7 +371,7 @@ void _k_thread_single_suspend(struct tcs *thread)
_mark_thread_as_suspended(thread);
}
void k_thread_suspend(struct tcs *thread)
void k_thread_suspend(struct k_thread *thread)
{
unsigned int key = irq_lock();
@ -384,7 +384,7 @@ void k_thread_suspend(struct tcs *thread)
}
}
void _k_thread_single_resume(struct tcs *thread)
void _k_thread_single_resume(struct k_thread *thread)
{
_mark_thread_as_not_suspended(thread);
@ -393,7 +393,7 @@ void _k_thread_single_resume(struct tcs *thread)
}
}
void k_thread_resume(struct tcs *thread)
void k_thread_resume(struct k_thread *thread)
{
unsigned int key = irq_lock();
@ -402,7 +402,7 @@ void k_thread_resume(struct tcs *thread)
_reschedule_threads(key);
}
void _k_thread_single_abort(struct tcs *thread)
void _k_thread_single_abort(struct k_thread *thread)
{
if (thread->fn_abort != NULL) {
thread->fn_abort();
@ -441,21 +441,21 @@ void _init_static_threads(void)
_k_thread_group_op(K_THREAD_GROUP_EXE, _k_thread_single_start);
}
uint32_t _k_thread_group_mask_get(struct tcs *thread)
uint32_t _k_thread_group_mask_get(struct k_thread *thread)
{
struct _static_thread_data *thread_data = thread->init_data;
return thread_data->init_groups;
}
void _k_thread_group_join(uint32_t groups, struct tcs *thread)
void _k_thread_group_join(uint32_t groups, struct k_thread *thread)
{
struct _static_thread_data *thread_data = thread->init_data;
thread_data->init_groups |= groups;
}
void _k_thread_group_leave(uint32_t groups, struct tcs *thread)
void _k_thread_group_leave(uint32_t groups, struct k_thread *thread)
{
struct _static_thread_data *thread_data = thread->init_data;

View file

@ -29,7 +29,7 @@
#include <wait_q.h>
#include <sched.h>
extern void _k_thread_single_abort(struct tcs *thread);
extern void _k_thread_single_abort(struct k_thread *thread);
#if !defined(CONFIG_ARCH_HAS_NANO_FIBER_ABORT)
void k_thread_abort(k_tid_t thread)

View file

@ -31,7 +31,8 @@ void timer_expiration_handler(struct _timeout *t)
{
int key = irq_lock();
struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
struct tcs *first_pending_thread = _unpend_first_thread(&timer->wait_q);
struct k_thread *first_pending_thread =
_unpend_first_thread(&timer->wait_q);
/* if the time is periodic, start it again */
if (timer->period > 0) {
@ -239,7 +240,7 @@ void k_timer_stop(struct k_timer *timer)
key = irq_lock();
struct tcs *pending_thread = _unpend_first_thread(&timer->wait_q);
struct k_thread *pending_thread = _unpend_first_thread(&timer->wait_q);
if (pending_thread) {
_set_thread_return_value(pending_thread, -ECANCELED);