kernel/timeout: Cleanup/speedup parallel announce logic
Commit b1182bf83b
("kernel/timeout: Serialize handler callbacks on
SMP") introduced an important fix to timeout handling on
multiprocessor systems, but it did it in a clumsy way by holding a
spinlock across the entire timeout process on all cores (everything
would have to spin until one core finished the list). The lock also
delays any nested interrupts that might otherwise be delivered, which
breaks our nested_irq_offload case on xtensa+SMP (where contra x86,
the "synchronous" interrupt is sensitive to mask state).
Doing this right turns out not to be so hard: take the timeout lock,
check to see if someone is already iterating
(i.e. "announce_remaining" is non-zero), and if so just increment the
ticks to announce and exit. The original cpu will then complete the
full timeout list without blocking any others longer than needed to
check the timeout state.
Fixes #44758
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
70e1f97ca4
commit
0b2ed3818d
1 changed files with 12 additions and 19 deletions
|
@ -18,17 +18,6 @@ static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
|
|||
|
||||
static struct k_spinlock timeout_lock;
|
||||
|
||||
/* On multiprocessor setups, it's possible to have multiple
|
||||
* sys_clock_announce() calls arrive in parallel (the latest to exit
|
||||
* the driver will generally be announcing zero ticks). But we want
|
||||
* the list of timeouts to be executed serially, so as not to confuse
|
||||
* application code. This lock wraps the announce loop, external to
|
||||
* the nested timeout_lock.
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
static struct k_spinlock ann_lock;
|
||||
#endif
|
||||
|
||||
#define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \
|
||||
? K_TICKS_FOREVER : INT_MAX)
|
||||
|
||||
|
@ -253,12 +242,20 @@ void sys_clock_announce(int32_t ticks)
|
|||
z_time_slice(ticks);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
k_spinlock_key_t ann_key = k_spin_lock(&ann_lock);
|
||||
#endif
|
||||
|
||||
k_spinlock_key_t key = k_spin_lock(&timeout_lock);
|
||||
|
||||
/* We release the lock around the callbacks below, so on SMP
|
||||
* systems someone might be already running the loop. Don't
|
||||
* race (which will cause paralllel execution of "sequential"
|
||||
* timeouts and confuse apps), just increment the tick count
|
||||
* and return.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_SMP) && announce_remaining != 0) {
|
||||
announce_remaining += ticks;
|
||||
k_spin_unlock(&timeout_lock, key);
|
||||
return;
|
||||
}
|
||||
|
||||
announce_remaining = ticks;
|
||||
|
||||
while (first() != NULL && first()->dticks <= announce_remaining) {
|
||||
|
@ -285,10 +282,6 @@ void sys_clock_announce(int32_t ticks)
|
|||
sys_clock_set_timeout(next_timeout(), false);
|
||||
|
||||
k_spin_unlock(&timeout_lock, key);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
k_spin_unlock(&ann_lock, ann_key);
|
||||
#endif
|
||||
}
|
||||
|
||||
int64_t sys_clock_tick_get(void)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue