kernel/sched: Remove remaining irq_lock use
The k_sleep() locking was actually to protect the _current state from preemption before the context switch, so document that and replace with a spinlock. Should probably unify this with the rather cleaner logic in pend_curr(), but right now "sleeping" and "pended" are needlessly distinct states. And we can remove the locking entirely from k_wakeup(). There's no reason for any of that to need to be synchronized. Even if we're racing with other thread modifiations, the state on exit will be a runnable thread without a timeout, or whatever timeout/pend state the other side was requesting (i.e. it's a bug, but not one solved by synhronization). Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
be03dbd4c7
commit
d27d4e6af2
1 changed files with 10 additions and 12 deletions
|
@ -871,7 +871,6 @@ s32_t _impl_k_sleep(s32_t duration)
|
|||
#ifdef CONFIG_MULTITHREADING
|
||||
u32_t expected_wakeup_time;
|
||||
s32_t ticks;
|
||||
unsigned int key;
|
||||
|
||||
__ASSERT(!_is_in_isr(), "");
|
||||
__ASSERT(duration != K_FOREVER, "");
|
||||
|
@ -886,12 +885,18 @@ s32_t _impl_k_sleep(s32_t duration)
|
|||
|
||||
ticks = _TICK_ALIGN + _ms_to_ticks(duration);
|
||||
expected_wakeup_time = ticks + z_tick_get_32();
|
||||
key = irq_lock();
|
||||
|
||||
/* Spinlock purely for local interrupt locking to prevent us
|
||||
* from being interrupted while _current is in an intermediate
|
||||
* state. Should unify this implementation with pend().
|
||||
*/
|
||||
struct k_spinlock local_lock = {};
|
||||
k_spinlock_key_t key = k_spin_lock(&local_lock);
|
||||
|
||||
_remove_thread_from_ready_q(_current);
|
||||
_add_thread_timeout(_current, ticks);
|
||||
|
||||
(void)_Swap_irqlock(key);
|
||||
(void)_Swap(&local_lock, key);
|
||||
|
||||
ticks = expected_wakeup_time - z_tick_get_32();
|
||||
if (ticks > 0) {
|
||||
|
@ -917,25 +922,18 @@ Z_SYSCALL_HANDLER(k_sleep, duration)
|
|||
|
||||
void _impl_k_wakeup(k_tid_t thread)
|
||||
{
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
/* verify first if thread is not waiting on an object */
|
||||
if (_is_thread_pending(thread)) {
|
||||
irq_unlock(key);
|
||||
return;
|
||||
}
|
||||
|
||||
if (_abort_thread_timeout(thread) < 0) {
|
||||
irq_unlock(key);
|
||||
return;
|
||||
}
|
||||
|
||||
_ready_thread(thread);
|
||||
|
||||
if (_is_in_isr()) {
|
||||
irq_unlock(key);
|
||||
} else {
|
||||
_reschedule_irqlock(key);
|
||||
if (!_is_in_isr()) {
|
||||
_reschedule_unlocked();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue