kernel: SMP-aware scheduler
The scheduler needs a few tweaks to work in SMP mode: 1. The "cache" field just doesn't work. With more than one CPU, caching the highest priority thread isn't useful as you may need N of them at any given time before another thread is returned to the scheduler. You could recalculate it at every change, but that provides no performance benefit. Remove. 2. The "bitmask" designed to prevent the need to individually check priorities is likewise dropped. This could work, but in fact on our only current SMP system and with current K_NUM_PRIOPRITIES values it provides no real benefit. 3. The individual threads now have a "current cpu" and "active" flag so that the choice of the next thread to run can correctly skip threads that are active on other CPUs. The upshot is that a decent amount of code gets #if'd out, and the new SMP implementations for _get_highest_ready_prio() and _get_next_ready_thread() are simpler and smaller, at the expense of having to drop older optimizations. Note that scheduler synchronization is unchanged: all scheduler APIs used to require that an irq_lock() be held, which means that they now require the global spinlock via the same API. This should be a very early candidate for lock granularity attention! Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
364cbae412
commit
2724fd11cb
7 changed files with 99 additions and 6 deletions
|
@ -14,7 +14,10 @@
|
|||
#endif /* CONFIG_KERNEL_EVENT_LOGGER */
|
||||
|
||||
extern k_tid_t const _main_thread;
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
extern k_tid_t const _idle_thread;
|
||||
#endif
|
||||
|
||||
extern void _add_thread_to_ready_q(struct k_thread *thread);
|
||||
extern void _remove_thread_from_ready_q(struct k_thread *thread);
|
||||
|
@ -34,10 +37,14 @@ extern void idle(void *, void *, void *);
|
|||
|
||||
/* find which one is the next thread to run */
|
||||
/* must be called with interrupts locked */
|
||||
#ifdef CONFIG_SMP
|
||||
extern struct k_thread *_get_next_ready_thread(void);
|
||||
#else
|
||||
static ALWAYS_INLINE struct k_thread *_get_next_ready_thread(void)
|
||||
{
|
||||
return _ready_q.cache;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int _is_idle_thread(void *entry_point)
|
||||
{
|
||||
|
@ -46,7 +53,11 @@ static inline int _is_idle_thread(void *entry_point)
|
|||
|
||||
static inline int _is_idle_thread_ptr(k_tid_t thread)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return thread->base.is_idle;
|
||||
#else
|
||||
return thread == _idle_thread;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MULTITHREADING
|
||||
|
@ -203,6 +214,9 @@ static inline int _get_ready_q_q_index(int prio)
|
|||
|
||||
/* find out the currently highest priority where a thread is ready to run */
|
||||
/* interrupts must be locked */
|
||||
#ifdef CONFIG_SMP
|
||||
int _get_highest_ready_prio(void);
|
||||
#else
|
||||
static inline int _get_highest_ready_prio(void)
|
||||
{
|
||||
int bitmap = 0;
|
||||
|
@ -228,6 +242,7 @@ static inline int _get_highest_ready_prio(void)
|
|||
|
||||
return abs_prio - _NUM_COOP_PRIO;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Checks if current thread must be context-switched out. The caller must
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue