kernel: Split reschdule & pend into irq/spin lock versions

Just like with _Swap(), we need two variants of these utilities which
can atomically release a lock and context switch.  The naming shifts
(for byte count reasons) to _reschedule/_pend_curr, and both have an
_irqlock variant which takes the traditional locking.

Just refactoring.  No logic changes.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-07-24 13:37:59 -07:00 committed by Anas Nashif
commit ec554f44d9
22 changed files with 87 additions and 66 deletions

View file

@ -38,9 +38,12 @@ void _move_thread_to_end_of_prio_q(struct k_thread *thread);
void _remove_thread_from_ready_q(struct k_thread *thread);
int _is_thread_time_slicing(struct k_thread *thread);
void _unpend_thread_no_timeout(struct k_thread *thread);
int _pend_current_thread(u32_t key, _wait_q_t *wait_q, s32_t timeout);
int _pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, s32_t timeout);
int _pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout);
void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout);
void _reschedule(u32_t key);
void _reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
void _reschedule_irqlock(u32_t key);
struct k_thread *_unpend_first_thread(_wait_q_t *wait_q);
void _unpend_thread(struct k_thread *thread);
int _unpend_all(_wait_q_t *wait_q);
@ -62,7 +65,6 @@ static ALWAYS_INLINE struct k_thread *_get_next_ready_thread(void)
}
#endif
static inline bool _is_idle_thread(void *entry_point)
{
return entry_point == idle;