kernel: Scheduler refactoring: use _reschedule_*() always
There was a somewhat promiscuous pattern in the kernel where IPC mechanisms would do something that might effect the current thread choice, then check _must_switch_threads() (or occasionally __must_switch_threads -- don't ask, the distinction is being replaced by real English words), sometimes _is_in_isr() (but not always, even in contexts where that looks like it would be a mistake), and then call _Swap() if everything is OK, otherwise releasing the irq_lock(). Sometimes this was done directly, sometimes via the inverted test, sometimes (poll, heh) by doing the test when the thread state was modified and then needlessly passing the result up the call stack to the point of the _Swap(). And some places were just calling _reschedule_threads(), which did all this already. Unify all this madness. The old _reschedule_threads() function has split into two variants: _reschedule_yield() and _reschedule_noyield(). The latter is the "normal" one that respects the cooperative priority of the current thread (i.e. it won't switch out even if there is a higher priority thread ready -- the current thread has to pend itself first), the former is used in the handful of places where code was doing a swap unconditionally, just to preserve precise behavior across the refactor. I'm not at all convinced it should exist... Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
b481d0a045
commit
8606fabf74
19 changed files with 78 additions and 196 deletions
|
@ -21,13 +21,13 @@ extern k_tid_t const _idle_thread;
|
|||
|
||||
extern void _add_thread_to_ready_q(struct k_thread *thread);
|
||||
extern void _remove_thread_from_ready_q(struct k_thread *thread);
|
||||
extern void _reschedule_threads(int key);
|
||||
extern int _reschedule_noyield(int key);
|
||||
extern int _reschedule_yield(int key);
|
||||
extern void k_sched_unlock(void);
|
||||
extern void _pend_thread(struct k_thread *thread,
|
||||
_wait_q_t *wait_q, s32_t timeout);
|
||||
extern void _pend_current_thread(_wait_q_t *wait_q, s32_t timeout);
|
||||
extern void _move_thread_to_end_of_prio_q(struct k_thread *thread);
|
||||
extern int __must_switch_threads(void);
|
||||
extern int _is_thread_time_slicing(struct k_thread *thread);
|
||||
extern void _update_time_slice_before_swap(void);
|
||||
#ifdef _NON_OPTIMIZED_TICKS_PER_SEC
|
||||
|
@ -262,15 +262,6 @@ static inline int _get_highest_ready_prio(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Checks if current thread must be context-switched out. The caller must
|
||||
* already know that the execution context is a thread.
|
||||
*/
|
||||
static inline int _must_switch_threads(void)
|
||||
{
|
||||
return _is_preempt(_current) && __must_switch_threads();
|
||||
}
|
||||
|
||||
/*
|
||||
* Called directly by other internal kernel code.
|
||||
* Exposed to applications via k_sched_lock(), which just calls this
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue