kernel/arch: inspect prio/sched_locked together for preemptibility

These two fields in the thread structure control the preemptibility of a
thread.

sched_locked is decremented when the scheduler gets locked, which means
that the scheduler is locked for values 0xff to 0x01, since it can be
locked recursively. A thread is coop if its priority is negative, thus
if the prio field value is 0x80 to 0xff when looked at as an unsigned
value.

By putting them end-to-end, this means that a thread is non-preemptible
if the bundled value is greater than or equal to 0x0080. This is the
only thing the interrupt exit code has to check to decide to try a
reschedule or not.

Change-Id: I902d36c14859d0d7a951a6aa1bea164613821aca
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2016-12-21 16:00:35 -05:00 committed by Anas Nashif
commit 168695c7ef
10 changed files with 80 additions and 77 deletions

View file

@ -149,13 +149,12 @@ SECTION_FUNC(TEXT, _firq_exit)
.balign 4 .balign 4
_firq_check_for_swap: _firq_check_for_swap:
/* coop thread ? do not schedule */ /*
ldb.x r0, [r2, _thread_offset_to_prio] * Non-preemptible thread ? Do not schedule (see explanation of
brlt r0, 0, _firq_no_reschedule * preempt field in kernel_struct.h).
*/
/* scheduler locked ? do not schedule */ ldh_s r0, [r2, _thread_offset_to_preempt]
ldb_s r0, [r2, _thread_offset_to_sched_locked] brhs r0, _NON_PREEMPT_THRESHOLD, _firq_no_reschedule
brne r0, 0, _firq_no_reschedule
/* Check if the current thread (in r2) is the cached thread */ /* Check if the current thread (in r2) is the cached thread */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache] ld_s r0, [r1, _kernel_offset_to_ready_q_cache]

View file

@ -167,13 +167,12 @@ _trap_return:
.balign 4 .balign 4
_trap_check_for_swap: _trap_check_for_swap:
/* coop thread ? do not schedule */ /*
ldb.x r0, [r2, _thread_offset_to_prio] * Non-preemptible thread ? Do not schedule (see explanation of
brlt r0, 0, _trap_return * preempt field in kernel_struct.h).
*/
/* scheduler locked ? do not schedule */ ldh_s r0, [r2, _thread_offset_to_preempt]
ldb_s r0, [r2, _thread_offset_to_sched_locked] brhs r0, _NON_PREEMPT_THRESHOLD, _trap_return
brne r0, 0, _trap_return
/* check if the current thread needs to be rescheduled */ /* check if the current thread needs to be rescheduled */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache] ld_s r0, [r1, _kernel_offset_to_ready_q_cache]

View file

@ -125,43 +125,31 @@ SECTION_FUNC(TEXT, _rirq_exit)
cmp r0, r3 cmp r0, r3
brgt _rirq_return_from_rirq brgt _rirq_return_from_rirq
ld sp, [r2, _thread_offset_to_sp]
#endif #endif
/* /*
* Both (a)reschedule and (b)non-reschedule cases need to load the current * Non-preemptible thread ? Do not schedule (see explanation of
* thread's stack, but don't have to use it until the decision is taken: * preempt field in kernel_struct.h).
* load the delay slots with the 'load stack pointer' instruction. */
ldh_s r0, [r2, _thread_offset_to_preempt]
mov r3, _NON_PREEMPT_THRESHOLD
brhs.d r0, r3, _rirq_no_reschedule
/*
* Both (a)reschedule and (b)non-reschedule cases need to load the
* current thread's stack, but don't have to use it until the decision
* is taken: load the delay slots with the 'load stack pointer'
* instruction.
* *
* a) needs to load it to save outgoing context. * a) needs to load it to save outgoing context.
* b) needs to load it to restore the interrupted context. * b) needs to load it to restore the interrupted context.
*/ */
/* coop thread ? do not schedule */
ldb.x r0, [r2, _thread_offset_to_prio]
cmp_s r0, 0
blt.d _rirq_no_reschedule
ld sp, [r2, _thread_offset_to_sp] ld sp, [r2, _thread_offset_to_sp]
/* scheduler locked ? do not schedule */
ldb_s r0, [r2, _thread_offset_to_sched_locked]
#ifdef CONFIG_ARC_STACK_CHECKING
breq.d r0, 0, _rirq_reschedule_check
ld sp, [r2, _thread_offset_to_sp]
b _rirq_no_reschedule
/* This branch is a bit far */
_rirq_reschedule_check:
#else
brne.d r0, 0, _rirq_no_reschedule
ld sp, [r2, _thread_offset_to_sp]
#endif
/* check if the current thread needs to be rescheduled */ /* check if the current thread needs to be rescheduled */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache] ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
breq.d r0, r2, _rirq_no_reschedule breq r0, r2, _rirq_no_reschedule
/* delay slot: always load the current thread's stack */
ld sp, [r2, _thread_offset_to_sp]
/* cached thread to run is in r0, fall through */ /* cached thread to run is in r0, fall through */

View file

@ -84,21 +84,14 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
ldr r0, =_kernel ldr r0, =_kernel
ldr r1, [r0, #_kernel_offset_to_current] ldr r1, [r0, #_kernel_offset_to_current]
#ifdef CONFIG_CPU_CORTEX_M0_M0PLUS
movs r3, #_thread_offset_to_prio
ldrsb r2, [r1, r3]
#else
ldrsb r2, [r1, #_thread_offset_to_prio]
#endif
ldrb r3, [r1, #_thread_offset_to_sched_locked]
/* coop thread ? do not schedule */ /*
cmp r2, #0 * Non-preemptible thread ? Do not schedule (see explanation of
blt _EXIT_EXC * preempt field in kernel_struct.h).
*/
/* scheduler locked ? do not schedule */ ldrh r2, [r1, #_thread_offset_to_preempt]
cmp r3, #0 cmp r2, #_PREEMPT_THRESHOLD
bne _EXIT_EXC bhi _EXIT_EXC
ldr r0, [r0, _kernel_offset_to_ready_q_cache] ldr r0, [r0, _kernel_offset_to_ready_q_cache]
cmp r0, r1 cmp r0, r1

View file

@ -131,13 +131,13 @@ on_irq_stack:
* switch * switch
*/ */
/* Do not reschedule coop threads (threads that have negative prio) */ /*
ldb r12, _thread_offset_to_prio(r11) * Non-preemptible thread ? Do not schedule (see explanation of
blt r12, zero, no_reschedule * preempt field in kernel_struct.h).
*/
/* Do not reschedule if scheduler is locked */ ldhu r12, _thread_offset_to_preempt(r11)
ldb r12, _thread_offset_to_sched_locked(r11) movui r3, _NON_PREEMPT_THRESHOLD
bne r12, zero, no_reschedule bgeu r12, r3, no_reschedule
/* Call into the kernel to see if a scheduling decision is necessary */ /* Call into the kernel to see if a scheduling decision is necessary */
ldw r2, _kernel_offset_to_ready_q_cache(r10) ldw r2, _kernel_offset_to_ready_q_cache(r10)

View file

@ -288,18 +288,11 @@ alreadyOnIntStack:
movl _kernel_offset_to_current(%ecx), %edx movl _kernel_offset_to_current(%ecx), %edx
/* /*
* Determine whether the execution of the ISR requires a context * Non-preemptible thread ? Do not schedule (see explanation of
* switch. If the thread is preemptible, scheduler is not locked and * preempt field in kernel_struct.h).
* a higher priority thread exists, a _Swap() needs to occur.
*/ */
cmpw $_NON_PREEMPT_THRESHOLD, _thread_offset_to_preempt(%edx)
/* do not reschedule coop threads (negative priority) */ jae noReschedule
cmpb $0, _thread_offset_to_prio(%edx)
jl noReschedule
/* do not reschedule if scheduler is locked */
cmpb $0, _thread_offset_to_sched_locked(%edx)
jne noReschedule
/* reschedule only if the scheduler says that we must do so */ /* reschedule only if the scheduler says that we must do so */

View file

@ -52,6 +52,7 @@ GEN_OFFSET_SYM(_thread_base_t, execution_flags);
GEN_OFFSET_SYM(_thread_base_t, thread_state); GEN_OFFSET_SYM(_thread_base_t, thread_state);
GEN_OFFSET_SYM(_thread_base_t, prio); GEN_OFFSET_SYM(_thread_base_t, prio);
GEN_OFFSET_SYM(_thread_base_t, sched_locked); GEN_OFFSET_SYM(_thread_base_t, sched_locked);
GEN_OFFSET_SYM(_thread_base_t, preempt);
GEN_OFFSET_SYM(_thread_base_t, swap_data); GEN_OFFSET_SYM(_thread_base_t, swap_data);
GEN_OFFSET_SYM(_thread_t, base); GEN_OFFSET_SYM(_thread_t, base);

View file

@ -65,6 +65,11 @@
#endif #endif
/* end - execution flags */ /* end - execution flags */
/* lowest value of _thread_base.preempt at which a thread is non-preemptible */
#define _NON_PREEMPT_THRESHOLD 0x0080
/* highest value of _thread_base.preempt at which a thread is preemptible */
#define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1)
#include <kernel_arch_data.h> #include <kernel_arch_data.h>
@ -91,11 +96,32 @@ struct _thread_base {
/* thread state */ /* thread state */
uint8_t thread_state; uint8_t thread_state;
/* scheduler lock count */ /*
volatile uint8_t sched_locked; * scheduler lock count and thread priority
*
/* thread priority used to sort linked list */ * These two fields control the preemptibility of a thread.
int8_t prio; *
* When the scheduler is locked, sched_locked is decremented, which
* means that the scheduler is locked for values from 0xff to 0x01. A
* thread is coop if its prio is negative, thus 0x80 to 0xff when
* looked at the value as unsigned.
*
* By putting them end-to-end, this means that a thread is
* non-preemptible if the bundled value is greater than or equal to
* 0x0080.
*/
union {
struct {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
uint8_t sched_locked;
volatile int8_t prio;
#else /* LITTLE and PDP */
volatile int8_t prio;
uint8_t sched_locked;
#endif
};
uint16_t preempt;
};
/* data returned by APIs */ /* data returned by APIs */
void *swap_data; void *swap_data;

View file

@ -146,7 +146,8 @@ static inline int _is_coop(struct k_thread *thread)
static inline int _is_preempt(struct k_thread *thread) static inline int _is_preempt(struct k_thread *thread)
{ {
#ifdef CONFIG_PREEMPT_ENABLED #ifdef CONFIG_PREEMPT_ENABLED
return !_is_coop(thread) && !thread->base.sched_locked; /* explanation in kernel_struct.h */
return thread->base.preempt <= _PREEMPT_THRESHOLD;
#else #else
return 0; return 0;
#endif #endif

View file

@ -65,6 +65,9 @@
#define _thread_offset_to_sched_locked \ #define _thread_offset_to_sched_locked \
(___thread_t_base_OFFSET + ___thread_base_t_sched_locked_OFFSET) (___thread_t_base_OFFSET + ___thread_base_t_sched_locked_OFFSET)
#define _thread_offset_to_preempt \
(___thread_t_base_OFFSET + ___thread_base_t_preempt_OFFSET)
#define _thread_offset_to_esf \ #define _thread_offset_to_esf \
(___thread_t_arch_OFFSET + ___thread_arch_t_esf_OFFSET) (___thread_t_arch_OFFSET + ___thread_arch_t_esf_OFFSET)