kernel: New timeslicing implementation
Instead of checking every time we hit the low-level context switch path to see if the new thread has a "partner" with which it needs to share time, just run the slice timer always and reset it from the scheduler at the points where it has already decided a switch needs to happen. In TICKLESS_KERNEL situations, we pay the cost of extra timer interrupts at ~10Hz or whatever, which is low (note also that this kind of regular wakeup architecture is required on SMP anyway so the scheduler can "notice" threads scheduled by other CPUs). Advantages: 1. Much simpler logic. Significantly smaller code. No variance or dependence on tickless modes or timer driver (beyond setting a simple timeout). 2. No arch-specific assembly integration with _Swap() needed 3. Better performance on many workloads, as the accounting now happens at most once per timer interrupt (~5 Hz) and true rescheduling and not on every unrelated context switch and interrupt return. 4. It's SMP-safe. The previous scheme kept the slice ticks as a global variable, which was an unnoticed bug. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
bf531ac4fc
commit
9098a45c84
9 changed files with 60 additions and 168 deletions
|
@ -24,9 +24,6 @@ _ASM_FILE_PROLOGUE
|
|||
GTEXT(_ExcExit)
|
||||
GTEXT(_IntExit)
|
||||
GDATA(_kernel)
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
GTEXT(_update_time_slice_before_swap)
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -79,17 +76,6 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
|
|||
cmp r0, r1
|
||||
beq _EXIT_EXC
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
push {lr}
|
||||
bl _update_time_slice_before_swap
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
pop {r0}
|
||||
mov lr, r0
|
||||
#else
|
||||
pop {lr}
|
||||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
||||
#endif /* CONFIG_TIMESLICING */
|
||||
|
||||
/* context switch required, pend the PendSV exception */
|
||||
ldr r1, =_SCS_ICSR
|
||||
ldr r2, =_SCS_ICSR_PENDSV
|
||||
|
|
|
@ -18,9 +18,6 @@ GTEXT(__swap)
|
|||
GTEXT(_irq_do_offload)
|
||||
GTEXT(_offload_routine)
|
||||
#endif
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
GTEXT(_update_time_slice_before_swap)
|
||||
#endif
|
||||
|
||||
/* Allows use of r1/at register, otherwise reserved for assembler use */
|
||||
.set noat
|
||||
|
@ -140,10 +137,6 @@ on_irq_stack:
|
|||
*/
|
||||
ldw sp, 0(sp)
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
call _update_time_slice_before_swap
|
||||
#endif
|
||||
|
||||
/* Argument to Swap() is estatus since that's the state of the
|
||||
* status register before the exception happened. When coming
|
||||
* out of the context switch we need this info to restore
|
||||
|
|
|
@ -30,10 +30,6 @@ GTEXT(z_sys_trace_isr_enter)
|
|||
GTEXT(_offload_routine)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
GTEXT(_update_time_slice_before_swap)
|
||||
#endif
|
||||
|
||||
/* exports */
|
||||
GTEXT(__irq_wrapper)
|
||||
|
||||
|
@ -296,9 +292,6 @@ on_thread_stack:
|
|||
#endif /* CONFIG_PREEMPT_ENABLED */
|
||||
|
||||
reschedule:
|
||||
#if CONFIG_TIMESLICING
|
||||
call _update_time_slice_before_swap
|
||||
#endif
|
||||
#if CONFIG_TRACING
|
||||
call z_sys_trace_thread_switched_in
|
||||
#endif
|
||||
|
|
|
@ -30,9 +30,6 @@
|
|||
/* externs */
|
||||
|
||||
GTEXT(__swap)
|
||||
#if defined(CONFIG_TIMESLICING)
|
||||
GTEXT(_update_time_slice_before_swap)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
GTEXT(_sys_power_save_idle_exit)
|
||||
|
@ -273,9 +270,6 @@ alreadyOnIntStack:
|
|||
|
||||
popl %esp /* switch back to outgoing thread's stack */
|
||||
|
||||
#if defined(CONFIG_TIMESLICING)
|
||||
call _update_time_slice_before_swap
|
||||
#endif
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
call _check_stack_sentinel
|
||||
#endif
|
||||
|
|
|
@ -96,6 +96,11 @@ struct _cpu {
|
|||
/* one assigned idle thread per CPU */
|
||||
struct k_thread *idle_thread;
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
/* number of ticks remaining in current time slice */
|
||||
int slice_ticks;
|
||||
#endif
|
||||
|
||||
u8_t id;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -48,10 +48,7 @@ void *_get_next_switch_handle(void *interrupted);
|
|||
struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q,
|
||||
struct k_thread *from);
|
||||
void idle(void *a, void *b, void *c);
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
void z_reset_timeslice(void);
|
||||
#endif
|
||||
void z_time_slice(int ticks);
|
||||
|
||||
/* find which one is the next thread to run */
|
||||
/* must be called with interrupts locked */
|
||||
|
@ -227,13 +224,7 @@ static inline void _ready_thread(struct k_thread *thread)
|
|||
_add_thread_to_ready_q(thread);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_SMP) && \
|
||||
defined(CONFIG_TIMESLICING)
|
||||
z_reset_timeslice();
|
||||
#endif
|
||||
|
||||
sys_trace_thread_ready(thread);
|
||||
|
||||
}
|
||||
|
||||
static inline void _ready_one_thread(_wait_q_t *wq)
|
||||
|
|
|
@ -9,12 +9,6 @@
|
|||
#include <ksched.h>
|
||||
#include <kernel_arch_func.h>
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
extern void _update_time_slice_before_swap(void);
|
||||
#else
|
||||
#define _update_time_slice_before_swap() /**/
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
extern void _check_stack_sentinel(void);
|
||||
#else
|
||||
|
@ -53,7 +47,6 @@ static inline int _Swap(unsigned int key)
|
|||
old_thread = _current;
|
||||
|
||||
_check_stack_sentinel();
|
||||
_update_time_slice_before_swap();
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
sys_trace_thread_switched_out();
|
||||
|
@ -96,7 +89,6 @@ static inline int _Swap(unsigned int key)
|
|||
{
|
||||
int ret;
|
||||
_check_stack_sentinel();
|
||||
_update_time_slice_before_swap();
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
sys_trace_thread_switched_out();
|
||||
|
|
122
kernel/sched.c
122
kernel/sched.c
|
@ -201,12 +201,61 @@ static struct k_thread *next_up(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
|
||||
static int slice_time;
|
||||
static int slice_max_prio;
|
||||
|
||||
static void reset_time_slice(void)
|
||||
{
|
||||
int to = _get_next_timeout_expiry();
|
||||
|
||||
_current_cpu->slice_ticks = slice_time;
|
||||
|
||||
if (to == K_FOREVER || slice_time < to) {
|
||||
z_clock_set_timeout(slice_time, false);
|
||||
}
|
||||
}
|
||||
|
||||
void k_sched_time_slice_set(s32_t duration_in_ms, int prio)
|
||||
{
|
||||
slice_time = _ms_to_ticks(duration_in_ms);
|
||||
slice_max_prio = prio;
|
||||
reset_time_slice();
|
||||
}
|
||||
|
||||
static inline int sliceable(struct k_thread *t)
|
||||
{
|
||||
return _is_preempt(t)
|
||||
&& !_is_prio_higher(t->base.prio, slice_max_prio)
|
||||
&& !_is_idle(t);
|
||||
}
|
||||
|
||||
/* Called out of each timer interrupt */
|
||||
void z_time_slice(int ticks)
|
||||
{
|
||||
if (slice_time && sliceable(_current)) {
|
||||
if (ticks >= _current_cpu->slice_ticks) {
|
||||
_move_thread_to_end_of_prio_q(_current);
|
||||
reset_time_slice();
|
||||
} else {
|
||||
_current_cpu->slice_ticks -= ticks;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void reset_time_slice(void) { /* !CONFIG_TIMESLICING */ }
|
||||
#endif
|
||||
|
||||
static void update_cache(int preempt_ok)
|
||||
{
|
||||
#ifndef CONFIG_SMP
|
||||
struct k_thread *th = next_up();
|
||||
|
||||
if (should_preempt(th, preempt_ok)) {
|
||||
if (th != _current) {
|
||||
reset_time_slice();
|
||||
}
|
||||
_kernel.ready_q.cache = th;
|
||||
} else {
|
||||
_kernel.ready_q.cache = _current;
|
||||
|
@ -238,7 +287,7 @@ void _move_thread_to_end_of_prio_q(struct k_thread *thread)
|
|||
_priq_run_remove(&_kernel.ready_q.runq, thread);
|
||||
_priq_run_add(&_kernel.ready_q.runq, thread);
|
||||
_mark_thread_as_queued(thread);
|
||||
update_cache(0);
|
||||
update_cache(thread == _current);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -452,6 +501,7 @@ void *_get_next_switch_handle(void *interrupted)
|
|||
struct k_thread *th = next_up();
|
||||
|
||||
if (_current != th) {
|
||||
reset_time_slice();
|
||||
_current_cpu->swap_ok = 0;
|
||||
_current = th;
|
||||
}
|
||||
|
@ -590,76 +640,6 @@ struct k_thread *_priq_mq_best(struct _priq_mq *pq)
|
|||
struct k_thread, base.qnode_dlist);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
extern s32_t _time_slice_duration; /* Measured in ticks */
|
||||
extern s32_t _time_slice_elapsed; /* Measured in ticks */
|
||||
extern int _time_slice_prio_ceiling;
|
||||
|
||||
void k_sched_time_slice_set(s32_t duration_in_ms, int prio)
|
||||
{
|
||||
__ASSERT(duration_in_ms >= 0, "");
|
||||
__ASSERT((prio >= 0) && (prio < CONFIG_NUM_PREEMPT_PRIORITIES), "");
|
||||
|
||||
_time_slice_duration = _ms_to_ticks(duration_in_ms);
|
||||
_time_slice_elapsed = 0;
|
||||
_time_slice_prio_ceiling = prio;
|
||||
}
|
||||
|
||||
int _is_thread_time_slicing(struct k_thread *thread)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* Should fix API. Doesn't make sense for non-running threads
|
||||
* to call this
|
||||
*/
|
||||
__ASSERT_NO_MSG(thread == _current);
|
||||
|
||||
if (_time_slice_duration <= 0 || !_is_preempt(thread) ||
|
||||
_is_prio_higher(thread->base.prio, _time_slice_prio_ceiling)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
LOCKED(&sched_lock) {
|
||||
struct k_thread *next = _priq_run_best(&_kernel.ready_q.runq);
|
||||
|
||||
if (next != NULL) {
|
||||
ret = thread->base.prio == next->base.prio;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
void z_reset_timeslice(void)
|
||||
{
|
||||
if (_is_thread_time_slicing(_get_next_ready_thread())) {
|
||||
z_clock_set_timeout(_time_slice_duration, false);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Must be called with interrupts locked */
|
||||
/* Should be called only immediately before a thread switch */
|
||||
void _update_time_slice_before_swap(void)
|
||||
{
|
||||
#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_SMP)
|
||||
if (!_is_thread_time_slicing(_get_next_ready_thread())) {
|
||||
return;
|
||||
}
|
||||
|
||||
int elapsed = (int)(z_clock_uptime() - z_last_tick_announced);
|
||||
int next_timeout = _get_next_timeout_expiry() - elapsed;
|
||||
int t = min(_time_slice_duration, next_timeout);
|
||||
|
||||
z_clock_set_timeout(t, false);
|
||||
#endif
|
||||
/* Restart time slice count at new thread switch */
|
||||
_time_slice_elapsed = 0;
|
||||
}
|
||||
#endif /* CONFIG_TIMESLICING */
|
||||
|
||||
int _unpend_all(_wait_q_t *waitq)
|
||||
{
|
||||
int need_sched = 0;
|
||||
|
|
|
@ -242,49 +242,6 @@ static inline void handle_timeouts(s32_t ticks)
|
|||
#define handle_timeouts(ticks) do { } while (false)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
s32_t _time_slice_elapsed;
|
||||
s32_t _time_slice_duration;
|
||||
int _time_slice_prio_ceiling;
|
||||
|
||||
/*
|
||||
* Always called from interrupt level, and always only from the system clock
|
||||
* interrupt, thus:
|
||||
* - _current does not have to be protected, since it only changes at thread
|
||||
* level or when exiting a non-nested interrupt
|
||||
* - _time_slice_elapsed does not have to be protected, since it can only change
|
||||
* in this function and at thread level
|
||||
* - _time_slice_duration does not have to be protected, since it can only
|
||||
* change at thread level
|
||||
*/
|
||||
static void handle_time_slicing(s32_t ticks)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
next_ts = 0;
|
||||
#endif
|
||||
if (!_is_thread_time_slicing(_current)) {
|
||||
return;
|
||||
}
|
||||
|
||||
_time_slice_elapsed += ticks;
|
||||
if (_time_slice_elapsed >= _time_slice_duration) {
|
||||
|
||||
unsigned int key;
|
||||
|
||||
_time_slice_elapsed = 0;
|
||||
|
||||
key = irq_lock();
|
||||
_move_thread_to_end_of_prio_q(_current);
|
||||
irq_unlock(key);
|
||||
}
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
next_ts = _time_slice_duration - _time_slice_elapsed;
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
#define handle_time_slicing(ticks) do { } while (false)
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Announce ticks to the kernel
|
||||
|
@ -319,8 +276,9 @@ void z_clock_announce(s32_t ticks)
|
|||
#endif
|
||||
handle_timeouts(ticks);
|
||||
|
||||
/* time slicing is basically handled like just yet another timeout */
|
||||
handle_time_slicing(ticks);
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
z_time_slice(ticks);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
u32_t next_to = _get_next_timeout_expiry();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue