kernel/sched: Add missing lock around waitq unpend calls
The two calls to unpend a thread from a wait queue were inexplicably* unsynchronized, as James Harris discovered. Rework them to call the lowest level primities so we can wrap the process inside the scheduler lock. Fixes #32136 * I took a brief look. What seems to have happened here is that these were originally synchronized via an implicit from an outer caller (remember the original Uniprocessor irq_lock() API is a recursive lock), and they were mostly implemented in terms of middle-level calls that were themselves locked. So those got ported over to the newer spinlock but the outer wrapper layer got forgotten. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
b365ab06a7
commit
604f0f44b6
2 changed files with 24 additions and 14 deletions
|
@ -41,6 +41,7 @@ void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
|
||||||
void z_remove_thread_from_ready_q(struct k_thread *thread);
|
void z_remove_thread_from_ready_q(struct k_thread *thread);
|
||||||
int z_is_thread_time_slicing(struct k_thread *thread);
|
int z_is_thread_time_slicing(struct k_thread *thread);
|
||||||
void z_unpend_thread_no_timeout(struct k_thread *thread);
|
void z_unpend_thread_no_timeout(struct k_thread *thread);
|
||||||
|
struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q);
|
||||||
int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
|
int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
|
||||||
_wait_q_t *wait_q, k_timeout_t timeout);
|
_wait_q_t *wait_q, k_timeout_t timeout);
|
||||||
int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout);
|
int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout);
|
||||||
|
@ -302,15 +303,4 @@ static ALWAYS_INLINE bool z_is_thread_timeout_expired(struct k_thread *thread)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
|
|
||||||
{
|
|
||||||
struct k_thread *thread = z_find_first_thread_to_unpend(wait_q, NULL);
|
|
||||||
|
|
||||||
if (thread != NULL) {
|
|
||||||
z_unpend_thread_no_timeout(thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
return thread;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* ZEPHYR_KERNEL_INCLUDE_KSCHED_H_ */
|
#endif /* ZEPHYR_KERNEL_INCLUDE_KSCHED_H_ */
|
||||||
|
|
|
@ -766,12 +766,32 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
|
||||||
return z_swap(lock, key);
|
return z_swap(lock, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
|
||||||
|
{
|
||||||
|
struct k_thread *thread = NULL;
|
||||||
|
|
||||||
|
LOCKED(&sched_spinlock) {
|
||||||
|
thread = _priq_wait_best(&wait_q->waitq);
|
||||||
|
|
||||||
|
if (thread != NULL) {
|
||||||
|
unpend_thread_no_timeout(thread);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return thread;
|
||||||
|
}
|
||||||
|
|
||||||
struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
|
struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
|
||||||
{
|
{
|
||||||
struct k_thread *thread = z_unpend1_no_timeout(wait_q);
|
struct k_thread *thread = NULL;
|
||||||
|
|
||||||
if (thread != NULL) {
|
LOCKED(&sched_spinlock) {
|
||||||
(void)z_abort_thread_timeout(thread);
|
thread = _priq_wait_best(&wait_q->waitq);
|
||||||
|
|
||||||
|
if (thread != NULL) {
|
||||||
|
unpend_thread_no_timeout(thread);
|
||||||
|
(void)z_abort_thread_timeout(thread);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return thread;
|
return thread;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue