kernel: make order_key field in thread conditional
The 'order_key' field in the thread structure '_thread_base' is only required when CONFIG_SCHED_SCALABLE and/or CONFIG_WAITQ_SCALABLE are enabled (neither of which is a default setting). Making the existence of this field conditional slightly reduces the size of the k_thread structure when neither of those Kconfig options are selected. Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
parent
4338122248
commit
ec4df64dc7
3 changed files with 11 additions and 3 deletions
|
@ -105,7 +105,9 @@ struct _thread_base {
|
||||||
int prio_deadline;
|
int prio_deadline;
|
||||||
#endif /* CONFIG_SCHED_DEADLINE */
|
#endif /* CONFIG_SCHED_DEADLINE */
|
||||||
|
|
||||||
|
#if defined(CONFIG_SCHED_SCALABLE) || defined(CONFIG_WAITQ_SCALABLE)
|
||||||
uint32_t order_key;
|
uint32_t order_key;
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* True for the per-CPU idle threads */
|
/* True for the per-CPU idle threads */
|
||||||
|
|
|
@ -80,10 +80,14 @@ list(APPEND kernel_files
|
||||||
system_work_q.c
|
system_work_q.c
|
||||||
work.c
|
work.c
|
||||||
condvar.c
|
condvar.c
|
||||||
priority_queues.c
|
|
||||||
thread.c
|
thread.c
|
||||||
sched.c
|
sched.c
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if (CONFIG_SCHED_SCALABLE OR CONFIG_WAITQ_SCALABLE)
|
||||||
|
list(APPEND kernel_files priority_queues.c)
|
||||||
|
endif()
|
||||||
|
|
||||||
# FIXME: Once the prior pipe implementation is removed, this should be included in the above list
|
# FIXME: Once the prior pipe implementation is removed, this should be included in the above list
|
||||||
if(NOT CONFIG_PIPES)
|
if(NOT CONFIG_PIPES)
|
||||||
list(APPEND kernel_files pipe.c)
|
list(APPEND kernel_files pipe.c)
|
||||||
|
|
|
@ -10,8 +10,6 @@
|
||||||
#include <zephyr/sys/math_extras.h>
|
#include <zephyr/sys/math_extras.h>
|
||||||
#include <zephyr/sys/dlist.h>
|
#include <zephyr/sys/dlist.h>
|
||||||
|
|
||||||
bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
|
|
||||||
|
|
||||||
/* Dumb Scheduling */
|
/* Dumb Scheduling */
|
||||||
#if defined(CONFIG_SCHED_DUMB)
|
#if defined(CONFIG_SCHED_DUMB)
|
||||||
#define _priq_run_init z_priq_dumb_init
|
#define _priq_run_init z_priq_dumb_init
|
||||||
|
@ -185,8 +183,11 @@ static ALWAYS_INLINE struct k_thread *z_priq_dumb_mask_best(sys_dlist_t *pq)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SCHED_CPU_MASK */
|
#endif /* CONFIG_SCHED_CPU_MASK */
|
||||||
|
|
||||||
|
#if defined(CONFIG_SCHED_SCALABLE) || defined(CONFIG_WAITQ_SCALABLE)
|
||||||
static ALWAYS_INLINE void z_priq_rb_init(struct _priq_rb *pq)
|
static ALWAYS_INLINE void z_priq_rb_init(struct _priq_rb *pq)
|
||||||
{
|
{
|
||||||
|
bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
|
||||||
|
|
||||||
*pq = (struct _priq_rb) {
|
*pq = (struct _priq_rb) {
|
||||||
.tree = {
|
.tree = {
|
||||||
.lessthan_fn = z_priq_rb_lessthan,
|
.lessthan_fn = z_priq_rb_lessthan,
|
||||||
|
@ -244,6 +245,7 @@ static ALWAYS_INLINE struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
|
||||||
}
|
}
|
||||||
return thread;
|
return thread;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
struct prio_info {
|
struct prio_info {
|
||||||
uint8_t offset_prio;
|
uint8_t offset_prio;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue