kernel: fix short time-slice reset

The kernel tracks time slice usage with the _time_slice_elapsed global.
Every time the timer interrupt goes off and the timer driver calls
_nano_sys_clock_tick_announce() with the elapsed time, this is added to
_time_slice_elapsed. If it exceeds the total time slice, the thread is
moved to the back of the queue for that priority level and
_time_slice_elapsed is reset to zero.

In a non-tickless kernel, this is the only time _time_slice_elapsed is
reset.  If a thread uses up a partial time slice, and then cooperatively
switches to another thread, the next thread will inherit the remaining
time slice, causing it not to be able to run as long as it ought to.

There does exist code to properly reset the elapsed count, but it was
only compiled in a tickless kernel. Now it is built any time
CONFIG_TIMESLICING is enabled.

Issue: ZEP-2107
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2017-05-30 12:51:39 -07:00 committed by Anas Nashif
commit 3989de7e3b
5 changed files with 10 additions and 20 deletions

View file

@ -24,7 +24,7 @@ _ASM_FILE_PROLOGUE
GTEXT(_ExcExit)
GTEXT(_IntExit)
GDATA(_kernel)
#ifdef CONFIG_TICKLESS_KERNEL
#ifdef CONFIG_TIMESLICING
GTEXT(_update_time_slice_before_swap)
#endif
@ -58,7 +58,7 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
/* _IntExit falls through to _ExcExit (they are aliases of each other) */
#ifdef CONFIG_TICKLESS_KERNEL
#ifdef CONFIG_TIMESLICING
push {lr}
bl _update_time_slice_before_swap
#if defined(CONFIG_ARMV6_M)

View file

@ -30,7 +30,7 @@
/* externs */
GTEXT(__swap)
#if defined(CONFIG_TICKLESS_KERNEL) && defined(CONFIG_TIMESLICING)
#if defined(CONFIG_TIMESLICING)
GTEXT(_update_time_slice_before_swap)
#endif
@ -340,7 +340,7 @@ alreadyOnIntStack:
popl %esi
#endif
#if defined(CONFIG_TICKLESS_KERNEL) && defined(CONFIG_TIMESLICING)
#if defined(CONFIG_TIMESLICING)
call _update_time_slice_before_swap
#endif
pushfl /* push KERNEL_LOCK_KEY argument */

View file

@ -52,7 +52,7 @@ extern void _new_thread(struct k_thread *thread, char *pStack, size_t stackSize,
extern unsigned int __swap(unsigned int key);
#if defined(CONFIG_TICKLESS_KERNEL) && defined(CONFIG_TIMESLICING)
#if defined(CONFIG_TIMESLICING)
extern void _update_time_slice_before_swap(void);
static inline unsigned int _time_slice_swap(unsigned int key)

View file

@ -397,7 +397,6 @@ void k_sched_time_slice_set(s32_t duration_in_ms, int prio)
_time_slice_prio_ceiling = prio;
}
#ifdef CONFIG_TICKLESS_KERNEL
int _is_thread_time_slicing(struct k_thread *thread)
{
/*
@ -424,21 +423,20 @@ int _is_thread_time_slicing(struct k_thread *thread)
/* Should be called only immediately before a thread switch */
void _update_time_slice_before_swap(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
if (!_is_thread_time_slicing(_get_next_ready_thread())) {
return;
}
/* Restart time slice count at new thread switch */
_time_slice_elapsed = 0;
u32_t remaining = _get_remaining_program_time();
if (!remaining || (_time_slice_duration < remaining)) {
_set_time(_time_slice_duration);
}
}
#endif
/* Restart time slice count at new thread switch */
_time_slice_elapsed = 0;
}
#endif /* CONFIG_TIMESLICING */
int k_is_preempt_thread(void)

View file

@ -307,18 +307,10 @@ static void handle_time_slicing(s32_t ticks)
{
#ifdef CONFIG_TICKLESS_KERNEL
next_ts = 0;
#endif
if (!_is_thread_time_slicing(_current)) {
return;
}
#else
if (_time_slice_duration == 0) {
return;
}
if (_is_prio_higher(_current->base.prio, _time_slice_prio_ceiling)) {
return;
}
#endif
_time_slice_elapsed += __ticks_to_ms(ticks);
if (_time_slice_elapsed >= _time_slice_duration) {