kernel: threads: remove thread groups
We have removed this features when we moved to the unified kernel. Those functions existed to support migration from the old kernel and can go now. Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
5efb6a1d94
commit
fb4eecaf5f
2 changed files with 0 additions and 62 deletions
|
@ -738,7 +738,6 @@ struct _static_thread_data {
|
|||
u32_t init_options;
|
||||
s32_t init_delay;
|
||||
void (*init_abort)(void);
|
||||
u32_t init_groups;
|
||||
};
|
||||
|
||||
#define _THREAD_INITIALIZER(thread, stack, stack_size, \
|
||||
|
@ -756,7 +755,6 @@ struct _static_thread_data {
|
|||
.init_options = (options), \
|
||||
.init_delay = (delay), \
|
||||
.init_abort = (abort), \
|
||||
.init_groups = (groups), \
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -403,45 +403,6 @@ int _impl_k_thread_cancel(k_tid_t tid)
|
|||
_SYSCALL_HANDLER1_SIMPLE(k_thread_cancel, K_OBJ_THREAD, struct k_thread *);
|
||||
#endif
|
||||
|
||||
static inline int is_in_any_group(struct _static_thread_data *thread_data,
|
||||
u32_t groups)
|
||||
{
|
||||
return !!(thread_data->init_groups & groups);
|
||||
}
|
||||
|
||||
void _k_thread_group_op(u32_t groups, void (*func)(struct k_thread *))
|
||||
{
|
||||
unsigned int key;
|
||||
|
||||
__ASSERT(!_is_in_isr(), "");
|
||||
|
||||
_sched_lock();
|
||||
|
||||
/* Invoke func() on each static thread in the specified group set. */
|
||||
|
||||
_FOREACH_STATIC_THREAD(thread_data) {
|
||||
if (is_in_any_group(thread_data, groups)) {
|
||||
key = irq_lock();
|
||||
func(thread_data->init_thread);
|
||||
irq_unlock(key);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If the current thread is still in a ready state, then let the
|
||||
* "unlock scheduler" code determine if any rescheduling is needed.
|
||||
*/
|
||||
if (_is_thread_ready(_current)) {
|
||||
k_sched_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
/* The current thread is no longer in a ready state--reschedule. */
|
||||
key = irq_lock();
|
||||
_sched_unlock_no_reschedule();
|
||||
_Swap(key);
|
||||
}
|
||||
|
||||
void _k_thread_single_start(struct k_thread *thread)
|
||||
{
|
||||
_mark_thread_as_started(thread);
|
||||
|
@ -615,27 +576,6 @@ void _init_thread_base(struct _thread_base *thread_base, int priority,
|
|||
_init_thread_timeout(thread_base);
|
||||
}
|
||||
|
||||
u32_t _k_thread_group_mask_get(struct k_thread *thread)
|
||||
{
|
||||
struct _static_thread_data *thread_data = thread->init_data;
|
||||
|
||||
return thread_data->init_groups;
|
||||
}
|
||||
|
||||
void _k_thread_group_join(u32_t groups, struct k_thread *thread)
|
||||
{
|
||||
struct _static_thread_data *thread_data = thread->init_data;
|
||||
|
||||
thread_data->init_groups |= groups;
|
||||
}
|
||||
|
||||
void _k_thread_group_leave(u32_t groups, struct k_thread *thread)
|
||||
{
|
||||
struct _static_thread_data *thread_data = thread->init_data;
|
||||
|
||||
thread_data->init_groups &= ~groups;
|
||||
}
|
||||
|
||||
void k_thread_access_grant(struct k_thread *thread, ...)
|
||||
{
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue