kernel/timeout: Fix recursive spinlock in z_set_timeout_expiry()
The z_set_timeout_expiry() function was added in part to simply the locking strategy, but it missed a case where a function it was calling was re-locking the same spinlock. It "works"[1] in uniprocessor environments, but can be a deadlock in SMP. Fix this by moving the meat of the function to an unlocked utility, use that locally, and turn the entry point into one that does locking. Actually this only gets called from idle now, which is a use case that will go away when TICKLESS_IDLE is removed as a separate feature (once you know all timeouts are set tickless, you don't need to set it from the idle entry at all). Discovered via lucky inspection. [1] It doesn't work. It releases the lock prematurely at the end of the inner block. But in practice this wasn't discovered. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
7f956a9353
commit
e664c78b82
1 changed files with 18 additions and 13 deletions
|
@ -62,6 +62,20 @@ static s32_t elapsed(void)
|
|||
return announce_remaining == 0 ? z_clock_elapsed() : 0;
|
||||
}
|
||||
|
||||
static s32_t next_timeout(void)
|
||||
{
|
||||
int maxw = can_wait_forever ? K_FOREVER : INT_MAX;
|
||||
struct _timeout *to = first();
|
||||
s32_t ret = to == NULL ? maxw : max(0, to->dticks - elapsed());
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
|
||||
ret = _current_cpu->slice_ticks;
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks)
|
||||
{
|
||||
__ASSERT(to->dticks < 0, "");
|
||||
|
@ -89,7 +103,7 @@ void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks)
|
|||
}
|
||||
|
||||
if (to == first()) {
|
||||
z_clock_set_timeout(_get_next_timeout_expiry(), false);
|
||||
z_clock_set_timeout(next_timeout(), false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -130,27 +144,18 @@ s32_t z_timeout_remaining(struct _timeout *timeout)
|
|||
|
||||
s32_t _get_next_timeout_expiry(void)
|
||||
{
|
||||
s32_t ret = 0;
|
||||
int maxw = can_wait_forever ? K_FOREVER : INT_MAX;
|
||||
s32_t ret = K_FOREVER;
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
struct _timeout *to = first();
|
||||
|
||||
ret = to == NULL ? maxw : max(0, to->dticks - elapsed());
|
||||
ret = next_timeout();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
|
||||
ret = _current_cpu->slice_ticks;
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
void z_set_timeout_expiry(s32_t ticks, bool idle)
|
||||
{
|
||||
LOCKED(&timeout_lock) {
|
||||
int next = _get_next_timeout_expiry();
|
||||
int next = next_timeout();
|
||||
bool sooner = (next == K_FOREVER) || (ticks < next);
|
||||
bool imminent = next <= 1;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue