unified: Rename k_thread_static_init structure
Renames the k_thread_static_init structure to better follow Zephyr naming conventions. Change-Id: I479add2aefa3421ebc0b879e0d04c0c7ffd7f107 Signed-off-by: Peter Mitsis <peter.mitsis@windriver.com>
This commit is contained in:
parent
4ab9d32bd3
commit
a04c0d70e1
2 changed files with 28 additions and 28 deletions
|
@ -156,7 +156,7 @@ extern void k_thread_abort(k_tid_t thread);
|
||||||
#define _THREAD_ERRNO_INIT(obj)
|
#define _THREAD_ERRNO_INIT(obj)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct k_thread_static_init {
|
struct _static_thread_data {
|
||||||
uint32_t init_groups;
|
uint32_t init_groups;
|
||||||
int init_prio;
|
int init_prio;
|
||||||
void (*init_entry)(void *, void *, void *);
|
void (*init_entry)(void *, void *, void *);
|
||||||
|
@ -192,14 +192,14 @@ struct k_thread_static_init {
|
||||||
* in array and thus should not have gaps between them.
|
* in array and thus should not have gaps between them.
|
||||||
* On x86 by default compiler aligns them by 32 byte boundary. To prevent
|
* On x86 by default compiler aligns them by 32 byte boundary. To prevent
|
||||||
* this 32-bit alignment in specified here.
|
* this 32-bit alignment in specified here.
|
||||||
* k_thread_static_init structure sise needs to be kept 32-bit aligned as well
|
* _static_thread_data structure sise needs to be kept 32-bit aligned as well
|
||||||
*/
|
*/
|
||||||
#define K_THREAD_OBJ_DEFINE(name, stack_size, \
|
#define K_THREAD_OBJ_DEFINE(name, stack_size, \
|
||||||
entry, p1, p2, p3, \
|
entry, p1, p2, p3, \
|
||||||
abort, prio, groups) \
|
abort, prio, groups) \
|
||||||
extern void entry(void *, void *, void *); \
|
extern void entry(void *, void *, void *); \
|
||||||
char __noinit __stack _k_thread_obj_##name[stack_size]; \
|
char __noinit __stack _k_thread_obj_##name[stack_size]; \
|
||||||
struct k_thread_static_init _k_thread_init_##name __aligned(4) \
|
struct _static_thread_data _k_thread_data_##name __aligned(4) \
|
||||||
__in_section(_k_task_list, private, task) = \
|
__in_section(_k_task_list, private, task) = \
|
||||||
K_THREAD_INITIALIZER(_k_thread_obj_##name, stack_size, \
|
K_THREAD_INITIALIZER(_k_thread_obj_##name, stack_size, \
|
||||||
entry, p1, p2, p3, abort, prio, groups)
|
entry, p1, p2, p3, abort, prio, groups)
|
||||||
|
|
|
@ -34,12 +34,12 @@
|
||||||
#include <sched.h>
|
#include <sched.h>
|
||||||
#include <wait_q.h>
|
#include <wait_q.h>
|
||||||
|
|
||||||
extern struct k_thread_static_init _k_task_list_start[];
|
extern struct _static_thread_data _k_task_list_start[];
|
||||||
extern struct k_thread_static_init _k_task_list_end[];
|
extern struct _static_thread_data _k_task_list_end[];
|
||||||
|
|
||||||
#define _FOREACH_STATIC_THREAD(thread_init) \
|
#define _FOREACH_STATIC_THREAD(thread_data) \
|
||||||
for (struct k_thread_static_init *thread_init = _k_task_list_start; \
|
for (struct _static_thread_data *thread_data = _k_task_list_start; \
|
||||||
thread_init < _k_task_list_end; thread_init++)
|
thread_data < _k_task_list_end; thread_data++)
|
||||||
|
|
||||||
|
|
||||||
/* Legacy API */
|
/* Legacy API */
|
||||||
|
@ -314,10 +314,10 @@ int k_thread_cancel(k_tid_t tid)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int is_in_any_group(struct k_thread_static_init *thread_init,
|
static inline int is_in_any_group(struct _static_thread_data *thread_data,
|
||||||
uint32_t groups)
|
uint32_t groups)
|
||||||
{
|
{
|
||||||
return !!(thread_init->init_groups & groups);
|
return !!(thread_data->init_groups & groups);
|
||||||
}
|
}
|
||||||
|
|
||||||
void _k_thread_group_op(uint32_t groups, void (*func)(struct tcs *))
|
void _k_thread_group_op(uint32_t groups, void (*func)(struct tcs *))
|
||||||
|
@ -330,10 +330,10 @@ void _k_thread_group_op(uint32_t groups, void (*func)(struct tcs *))
|
||||||
|
|
||||||
/* Invoke func() on each static thread in the specified group set. */
|
/* Invoke func() on each static thread in the specified group set. */
|
||||||
|
|
||||||
_FOREACH_STATIC_THREAD(thread_init) {
|
_FOREACH_STATIC_THREAD(thread_data) {
|
||||||
if (is_in_any_group(thread_init, groups)) {
|
if (is_in_any_group(thread_data, groups)) {
|
||||||
key = irq_lock();
|
key = irq_lock();
|
||||||
func(thread_init->thread);
|
func(thread_data->thread);
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -424,42 +424,42 @@ void _k_thread_single_abort(struct tcs *thread)
|
||||||
|
|
||||||
void _init_static_threads(void)
|
void _init_static_threads(void)
|
||||||
{
|
{
|
||||||
_FOREACH_STATIC_THREAD(thread_init) {
|
_FOREACH_STATIC_THREAD(thread_data) {
|
||||||
_new_thread(
|
_new_thread(
|
||||||
thread_init->init_stack,
|
thread_data->init_stack,
|
||||||
thread_init->init_stack_size,
|
thread_data->init_stack_size,
|
||||||
NULL,
|
NULL,
|
||||||
thread_init->init_entry,
|
thread_data->init_entry,
|
||||||
thread_init->init_p1,
|
thread_data->init_p1,
|
||||||
thread_init->init_p2,
|
thread_data->init_p2,
|
||||||
thread_init->init_p3,
|
thread_data->init_p3,
|
||||||
thread_init->init_prio,
|
thread_data->init_prio,
|
||||||
0);
|
0);
|
||||||
|
|
||||||
thread_init->thread->init_data = thread_init;
|
thread_data->thread->init_data = thread_data;
|
||||||
}
|
}
|
||||||
_k_thread_group_op(K_THREAD_GROUP_EXE, _k_thread_single_start);
|
_k_thread_group_op(K_THREAD_GROUP_EXE, _k_thread_single_start);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t _k_thread_group_mask_get(struct tcs *thread)
|
uint32_t _k_thread_group_mask_get(struct tcs *thread)
|
||||||
{
|
{
|
||||||
struct k_thread_static_init *thread_init = thread->init_data;
|
struct _static_thread_data *thread_data = thread->init_data;
|
||||||
|
|
||||||
return thread_init->init_groups;
|
return thread_data->init_groups;
|
||||||
}
|
}
|
||||||
|
|
||||||
void _k_thread_group_join(uint32_t groups, struct tcs *thread)
|
void _k_thread_group_join(uint32_t groups, struct tcs *thread)
|
||||||
{
|
{
|
||||||
struct k_thread_static_init *thread_init = thread->init_data;
|
struct _static_thread_data *thread_data = thread->init_data;
|
||||||
|
|
||||||
thread_init->init_groups |= groups;
|
thread_data->init_groups |= groups;
|
||||||
}
|
}
|
||||||
|
|
||||||
void _k_thread_group_leave(uint32_t groups, struct tcs *thread)
|
void _k_thread_group_leave(uint32_t groups, struct tcs *thread)
|
||||||
{
|
{
|
||||||
struct k_thread_static_init *thread_init = thread->init_data;
|
struct _static_thread_data *thread_data = thread->init_data;
|
||||||
|
|
||||||
thread_init->init_groups &= groups;
|
thread_data->init_groups &= groups;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* legacy API */
|
/* legacy API */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue