kernel/arch: inspect prio/sched_locked together for preemptibility

These two fields in the thread structure control the preemptibility of a
thread.

sched_locked is decremented when the scheduler gets locked, which means
that the scheduler is locked for values 0xff to 0x01, since it can be
locked recursively. A thread is coop if its priority is negative, thus
if the prio field value is 0x80 to 0xff when looked at as an unsigned
value.

By putting them end-to-end, this means that a thread is non-preemptible
if the bundled value is greater than or equal to 0x0080. This is the
only thing the interrupt exit code has to check to decide to try a
reschedule or not.

Change-Id: I902d36c14859d0d7a951a6aa1bea164613821aca
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2016-12-21 16:00:35 -05:00 committed by Anas Nashif
commit 168695c7ef
10 changed files with 80 additions and 77 deletions

View file

@ -149,13 +149,12 @@ SECTION_FUNC(TEXT, _firq_exit)
.balign 4
_firq_check_for_swap:
/* coop thread ? do not schedule */
ldb.x r0, [r2, _thread_offset_to_prio]
brlt r0, 0, _firq_no_reschedule
/* scheduler locked ? do not schedule */
ldb_s r0, [r2, _thread_offset_to_sched_locked]
brne r0, 0, _firq_no_reschedule
/*
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
ldh_s r0, [r2, _thread_offset_to_preempt]
brhs r0, _NON_PREEMPT_THRESHOLD, _firq_no_reschedule
/* Check if the current thread (in r2) is the cached thread */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]

View file

@ -167,13 +167,12 @@ _trap_return:
.balign 4
_trap_check_for_swap:
/* coop thread ? do not schedule */
ldb.x r0, [r2, _thread_offset_to_prio]
brlt r0, 0, _trap_return
/* scheduler locked ? do not schedule */
ldb_s r0, [r2, _thread_offset_to_sched_locked]
brne r0, 0, _trap_return
/*
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
ldh_s r0, [r2, _thread_offset_to_preempt]
brhs r0, _NON_PREEMPT_THRESHOLD, _trap_return
/* check if the current thread needs to be rescheduled */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]

View file

@ -125,43 +125,31 @@ SECTION_FUNC(TEXT, _rirq_exit)
cmp r0, r3
brgt _rirq_return_from_rirq
ld sp, [r2, _thread_offset_to_sp]
#endif
/*
* Both (a)reschedule and (b)non-reschedule cases need to load the current
* thread's stack, but don't have to use it until the decision is taken:
* load the delay slots with the 'load stack pointer' instruction.
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
ldh_s r0, [r2, _thread_offset_to_preempt]
mov r3, _NON_PREEMPT_THRESHOLD
brhs.d r0, r3, _rirq_no_reschedule
/*
* Both (a)reschedule and (b)non-reschedule cases need to load the
* current thread's stack, but don't have to use it until the decision
* is taken: load the delay slots with the 'load stack pointer'
* instruction.
*
* a) needs to load it to save outgoing context.
* b) needs to load it to restore the interrupted context.
*/
/* coop thread ? do not schedule */
ldb.x r0, [r2, _thread_offset_to_prio]
cmp_s r0, 0
blt.d _rirq_no_reschedule
ld sp, [r2, _thread_offset_to_sp]
/* scheduler locked ? do not schedule */
ldb_s r0, [r2, _thread_offset_to_sched_locked]
#ifdef CONFIG_ARC_STACK_CHECKING
breq.d r0, 0, _rirq_reschedule_check
ld sp, [r2, _thread_offset_to_sp]
b _rirq_no_reschedule
/* This branch is a bit far */
_rirq_reschedule_check:
#else
brne.d r0, 0, _rirq_no_reschedule
ld sp, [r2, _thread_offset_to_sp]
#endif
/* check if the current thread needs to be rescheduled */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
breq.d r0, r2, _rirq_no_reschedule
/* delay slot: always load the current thread's stack */
ld sp, [r2, _thread_offset_to_sp]
breq r0, r2, _rirq_no_reschedule
/* cached thread to run is in r0, fall through */

View file

@ -84,21 +84,14 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
ldr r0, =_kernel
ldr r1, [r0, #_kernel_offset_to_current]
#ifdef CONFIG_CPU_CORTEX_M0_M0PLUS
movs r3, #_thread_offset_to_prio
ldrsb r2, [r1, r3]
#else
ldrsb r2, [r1, #_thread_offset_to_prio]
#endif
ldrb r3, [r1, #_thread_offset_to_sched_locked]
/* coop thread ? do not schedule */
cmp r2, #0
blt _EXIT_EXC
/* scheduler locked ? do not schedule */
cmp r3, #0
bne _EXIT_EXC
/*
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
ldrh r2, [r1, #_thread_offset_to_preempt]
cmp r2, #_PREEMPT_THRESHOLD
bhi _EXIT_EXC
ldr r0, [r0, _kernel_offset_to_ready_q_cache]
cmp r0, r1

View file

@ -131,13 +131,13 @@ on_irq_stack:
* switch
*/
/* Do not reschedule coop threads (threads that have negative prio) */
ldb r12, _thread_offset_to_prio(r11)
blt r12, zero, no_reschedule
/* Do not reschedule if scheduler is locked */
ldb r12, _thread_offset_to_sched_locked(r11)
bne r12, zero, no_reschedule
/*
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
ldhu r12, _thread_offset_to_preempt(r11)
movui r3, _NON_PREEMPT_THRESHOLD
bgeu r12, r3, no_reschedule
/* Call into the kernel to see if a scheduling decision is necessary */
ldw r2, _kernel_offset_to_ready_q_cache(r10)

View file

@ -288,18 +288,11 @@ alreadyOnIntStack:
movl _kernel_offset_to_current(%ecx), %edx
/*
* Determine whether the execution of the ISR requires a context
* switch. If the thread is preemptible, scheduler is not locked and
* a higher priority thread exists, a _Swap() needs to occur.
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
/* do not reschedule coop threads (negative priority) */
cmpb $0, _thread_offset_to_prio(%edx)
jl noReschedule
/* do not reschedule if scheduler is locked */
cmpb $0, _thread_offset_to_sched_locked(%edx)
jne noReschedule
cmpw $_NON_PREEMPT_THRESHOLD, _thread_offset_to_preempt(%edx)
jae noReschedule
/* reschedule only if the scheduler says that we must do so */

View file

@ -52,6 +52,7 @@ GEN_OFFSET_SYM(_thread_base_t, execution_flags);
GEN_OFFSET_SYM(_thread_base_t, thread_state);
GEN_OFFSET_SYM(_thread_base_t, prio);
GEN_OFFSET_SYM(_thread_base_t, sched_locked);
GEN_OFFSET_SYM(_thread_base_t, preempt);
GEN_OFFSET_SYM(_thread_base_t, swap_data);
GEN_OFFSET_SYM(_thread_t, base);

View file

@ -65,6 +65,11 @@
#endif
/* end - execution flags */
/* lowest value of _thread_base.preempt at which a thread is non-preemptible */
#define _NON_PREEMPT_THRESHOLD 0x0080
/* highest value of _thread_base.preempt at which a thread is preemptible */
#define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1)
#include <kernel_arch_data.h>
@ -91,11 +96,32 @@ struct _thread_base {
/* thread state */
uint8_t thread_state;
/* scheduler lock count */
volatile uint8_t sched_locked;
/* thread priority used to sort linked list */
int8_t prio;
/*
* scheduler lock count and thread priority
*
* These two fields control the preemptibility of a thread.
*
* When the scheduler is locked, sched_locked is decremented, which
* means that the scheduler is locked for values from 0xff to 0x01. A
* thread is coop if its prio is negative, thus 0x80 to 0xff when
* looked at the value as unsigned.
*
* By putting them end-to-end, this means that a thread is
* non-preemptible if the bundled value is greater than or equal to
* 0x0080.
*/
union {
struct {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
uint8_t sched_locked;
volatile int8_t prio;
#else /* LITTLE and PDP */
volatile int8_t prio;
uint8_t sched_locked;
#endif
};
uint16_t preempt;
};
/* data returned by APIs */
void *swap_data;

View file

@ -146,7 +146,8 @@ static inline int _is_coop(struct k_thread *thread)
static inline int _is_preempt(struct k_thread *thread)
{
#ifdef CONFIG_PREEMPT_ENABLED
return !_is_coop(thread) && !thread->base.sched_locked;
/* explanation in kernel_struct.h */
return thread->base.preempt <= _PREEMPT_THRESHOLD;
#else
return 0;
#endif

View file

@ -65,6 +65,9 @@
#define _thread_offset_to_sched_locked \
(___thread_t_base_OFFSET + ___thread_base_t_sched_locked_OFFSET)
#define _thread_offset_to_preempt \
(___thread_t_base_OFFSET + ___thread_base_t_preempt_OFFSET)
#define _thread_offset_to_esf \
(___thread_t_arch_OFFSET + ___thread_arch_t_esf_OFFSET)