kernel: Scheduler rewrite

This replaces the existing scheduler (but not priority handling)
implementation with a somewhat simpler one.  Behavior as to thread
selection does not change.  New features:

+ Unifies SMP and uniprocessing selection code (with the sole
  exception of the "cache" trick not being possible in SMP).

+ The old static multi-queue implementation is gone and has been
  replaced with a build-time choice of either a "dumb" list
  implementation (faster and significantly smaller for apps with only
  a few threads) or a balanced tree queue which scales well to
  arbitrary numbers of threads and priority levels.  This is
  controlled via the CONFIG_SCHED_DUMB kconfig variable.

+ The balanced tree implementation is usable symmetrically for the
  wait_q abstraction, fixing a scalability glitch Zephyr had when many
  threads were waiting on a single object.  This can be selected via
  CONFIG_WAITQ_FAST.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-05-03 14:51:49 -07:00 committed by Anas Nashif
commit 1acd8c2996
10 changed files with 600 additions and 484 deletions

View file

@ -33,6 +33,7 @@
#define _ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
#endif
void _sched_init(void);
void _add_thread_to_ready_q(struct k_thread *thread);
void _move_thread_to_end_of_prio_q(struct k_thread *thread);
void _remove_thread_from_ready_q(struct k_thread *thread);
@ -111,6 +112,11 @@ static inline int _is_thread_polling(struct k_thread *thread)
return _is_thread_state_set(thread, _THREAD_POLLING);
}
static inline int _is_thread_queued(struct k_thread *thread)
{
return _is_thread_state_set(thread, _THREAD_QUEUED);
}
static inline void _mark_thread_as_suspended(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_SUSPENDED;
@ -126,6 +132,11 @@ static inline void _mark_thread_as_started(struct k_thread *thread)
thread->base.thread_state &= ~_THREAD_PRESTART;
}
static inline void _mark_thread_as_pending(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_PENDING;
}
static inline void _mark_thread_as_not_pending(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_PENDING;
@ -152,6 +163,16 @@ static inline void _mark_thread_as_not_polling(struct k_thread *thread)
_reset_thread_states(thread, _THREAD_POLLING);
}
static inline void _mark_thread_as_queued(struct k_thread *thread)
{
_set_thread_states(thread, _THREAD_QUEUED);
}
static inline void _mark_thread_as_not_queued(struct k_thread *thread)
{
_reset_thread_states(thread, _THREAD_QUEUED);
}
static inline int _is_under_prio_ceiling(int prio)
{
return prio >= CONFIG_PRIORITY_CEILING;