diff --git a/arch/arc/core/fast_irq.S b/arch/arc/core/fast_irq.S index 97fa6e6026a..47b41ea2c47 100644 --- a/arch/arc/core/fast_irq.S +++ b/arch/arc/core/fast_irq.S @@ -149,13 +149,12 @@ SECTION_FUNC(TEXT, _firq_exit) .balign 4 _firq_check_for_swap: - /* coop thread ? do not schedule */ - ldb.x r0, [r2, _thread_offset_to_prio] - brlt r0, 0, _firq_no_reschedule - - /* scheduler locked ? do not schedule */ - ldb_s r0, [r2, _thread_offset_to_sched_locked] - brne r0, 0, _firq_no_reschedule + /* + * Non-preemptible thread ? Do not schedule (see explanation of + * preempt field in kernel_struct.h). + */ + ldh_s r0, [r2, _thread_offset_to_preempt] + brhs r0, _NON_PREEMPT_THRESHOLD, _firq_no_reschedule /* Check if the current thread (in r2) is the cached thread */ ld_s r0, [r1, _kernel_offset_to_ready_q_cache] diff --git a/arch/arc/core/fault_s.S b/arch/arc/core/fault_s.S index eb842bfc437..77496212bf2 100644 --- a/arch/arc/core/fault_s.S +++ b/arch/arc/core/fault_s.S @@ -167,13 +167,12 @@ _trap_return: .balign 4 _trap_check_for_swap: - /* coop thread ? do not schedule */ - ldb.x r0, [r2, _thread_offset_to_prio] - brlt r0, 0, _trap_return - - /* scheduler locked ? do not schedule */ - ldb_s r0, [r2, _thread_offset_to_sched_locked] - brne r0, 0, _trap_return + /* + * Non-preemptible thread ? Do not schedule (see explanation of + * preempt field in kernel_struct.h). + */ + ldh_s r0, [r2, _thread_offset_to_preempt] + brhs r0, _NON_PREEMPT_THRESHOLD, _trap_return /* check if the current thread needs to be rescheduled */ ld_s r0, [r1, _kernel_offset_to_ready_q_cache] diff --git a/arch/arc/core/regular_irq.S b/arch/arc/core/regular_irq.S index b0e0fbdf305..17cc085b860 100644 --- a/arch/arc/core/regular_irq.S +++ b/arch/arc/core/regular_irq.S @@ -125,43 +125,31 @@ SECTION_FUNC(TEXT, _rirq_exit) cmp r0, r3 brgt _rirq_return_from_rirq - ld sp, [r2, _thread_offset_to_sp] #endif /* - * Both (a)reschedule and (b)non-reschedule cases need to load the current - * thread's stack, but don't have to use it until the decision is taken: - * load the delay slots with the 'load stack pointer' instruction. + * Non-preemptible thread ? Do not schedule (see explanation of + * preempt field in kernel_struct.h). + */ + ldh_s r0, [r2, _thread_offset_to_preempt] + mov r3, _NON_PREEMPT_THRESHOLD + brhs.d r0, r3, _rirq_no_reschedule + + /* + * Both (a)reschedule and (b)non-reschedule cases need to load the + * current thread's stack, but don't have to use it until the decision + * is taken: load the delay slots with the 'load stack pointer' + * instruction. * * a) needs to load it to save outgoing context. * b) needs to load it to restore the interrupted context. */ - /* coop thread ? do not schedule */ - ldb.x r0, [r2, _thread_offset_to_prio] - cmp_s r0, 0 - blt.d _rirq_no_reschedule ld sp, [r2, _thread_offset_to_sp] - /* scheduler locked ? do not schedule */ - ldb_s r0, [r2, _thread_offset_to_sched_locked] -#ifdef CONFIG_ARC_STACK_CHECKING - breq.d r0, 0, _rirq_reschedule_check - ld sp, [r2, _thread_offset_to_sp] - b _rirq_no_reschedule - /* This branch is a bit far */ -_rirq_reschedule_check: -#else - brne.d r0, 0, _rirq_no_reschedule - ld sp, [r2, _thread_offset_to_sp] -#endif - /* check if the current thread needs to be rescheduled */ ld_s r0, [r1, _kernel_offset_to_ready_q_cache] - breq.d r0, r2, _rirq_no_reschedule - - /* delay slot: always load the current thread's stack */ - ld sp, [r2, _thread_offset_to_sp] + breq r0, r2, _rirq_no_reschedule /* cached thread to run is in r0, fall through */ diff --git a/arch/arm/core/exc_exit.S b/arch/arm/core/exc_exit.S index eb8992fd8e9..e02cd7bfccd 100644 --- a/arch/arm/core/exc_exit.S +++ b/arch/arm/core/exc_exit.S @@ -84,21 +84,14 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit) ldr r0, =_kernel ldr r1, [r0, #_kernel_offset_to_current] -#ifdef CONFIG_CPU_CORTEX_M0_M0PLUS - movs r3, #_thread_offset_to_prio - ldrsb r2, [r1, r3] -#else - ldrsb r2, [r1, #_thread_offset_to_prio] -#endif - ldrb r3, [r1, #_thread_offset_to_sched_locked] - /* coop thread ? do not schedule */ - cmp r2, #0 - blt _EXIT_EXC - - /* scheduler locked ? do not schedule */ - cmp r3, #0 - bne _EXIT_EXC + /* + * Non-preemptible thread ? Do not schedule (see explanation of + * preempt field in kernel_struct.h). + */ + ldrh r2, [r1, #_thread_offset_to_preempt] + cmp r2, #_PREEMPT_THRESHOLD + bhi _EXIT_EXC ldr r0, [r0, _kernel_offset_to_ready_q_cache] cmp r0, r1 diff --git a/arch/nios2/core/exception.S b/arch/nios2/core/exception.S index 9db5e1f137c..ff1536ce440 100644 --- a/arch/nios2/core/exception.S +++ b/arch/nios2/core/exception.S @@ -131,13 +131,13 @@ on_irq_stack: * switch */ - /* Do not reschedule coop threads (threads that have negative prio) */ - ldb r12, _thread_offset_to_prio(r11) - blt r12, zero, no_reschedule - - /* Do not reschedule if scheduler is locked */ - ldb r12, _thread_offset_to_sched_locked(r11) - bne r12, zero, no_reschedule + /* + * Non-preemptible thread ? Do not schedule (see explanation of + * preempt field in kernel_struct.h). + */ + ldhu r12, _thread_offset_to_preempt(r11) + movui r3, _NON_PREEMPT_THRESHOLD + bgeu r12, r3, no_reschedule /* Call into the kernel to see if a scheduling decision is necessary */ ldw r2, _kernel_offset_to_ready_q_cache(r10) diff --git a/arch/x86/core/intstub.S b/arch/x86/core/intstub.S index b35ced521a8..d81f90b8648 100644 --- a/arch/x86/core/intstub.S +++ b/arch/x86/core/intstub.S @@ -288,18 +288,11 @@ alreadyOnIntStack: movl _kernel_offset_to_current(%ecx), %edx /* - * Determine whether the execution of the ISR requires a context - * switch. If the thread is preemptible, scheduler is not locked and - * a higher priority thread exists, a _Swap() needs to occur. + * Non-preemptible thread ? Do not schedule (see explanation of + * preempt field in kernel_struct.h). */ - - /* do not reschedule coop threads (negative priority) */ - cmpb $0, _thread_offset_to_prio(%edx) - jl noReschedule - - /* do not reschedule if scheduler is locked */ - cmpb $0, _thread_offset_to_sched_locked(%edx) - jne noReschedule + cmpw $_NON_PREEMPT_THRESHOLD, _thread_offset_to_preempt(%edx) + jae noReschedule /* reschedule only if the scheduler says that we must do so */ diff --git a/kernel/include/kernel_offsets.h b/kernel/include/kernel_offsets.h index 63ac010ac2d..546d9fb2194 100644 --- a/kernel/include/kernel_offsets.h +++ b/kernel/include/kernel_offsets.h @@ -52,6 +52,7 @@ GEN_OFFSET_SYM(_thread_base_t, execution_flags); GEN_OFFSET_SYM(_thread_base_t, thread_state); GEN_OFFSET_SYM(_thread_base_t, prio); GEN_OFFSET_SYM(_thread_base_t, sched_locked); +GEN_OFFSET_SYM(_thread_base_t, preempt); GEN_OFFSET_SYM(_thread_base_t, swap_data); GEN_OFFSET_SYM(_thread_t, base); diff --git a/kernel/include/kernel_structs.h b/kernel/include/kernel_structs.h index 570a6398dc1..13ae5cf2e91 100644 --- a/kernel/include/kernel_structs.h +++ b/kernel/include/kernel_structs.h @@ -65,6 +65,11 @@ #endif /* end - execution flags */ +/* lowest value of _thread_base.preempt at which a thread is non-preemptible */ +#define _NON_PREEMPT_THRESHOLD 0x0080 + +/* highest value of _thread_base.preempt at which a thread is preemptible */ +#define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1) #include @@ -91,11 +96,32 @@ struct _thread_base { /* thread state */ uint8_t thread_state; - /* scheduler lock count */ - volatile uint8_t sched_locked; - - /* thread priority used to sort linked list */ - int8_t prio; + /* + * scheduler lock count and thread priority + * + * These two fields control the preemptibility of a thread. + * + * When the scheduler is locked, sched_locked is decremented, which + * means that the scheduler is locked for values from 0xff to 0x01. A + * thread is coop if its prio is negative, thus 0x80 to 0xff when + * looked at the value as unsigned. + * + * By putting them end-to-end, this means that a thread is + * non-preemptible if the bundled value is greater than or equal to + * 0x0080. + */ + union { + struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint8_t sched_locked; + volatile int8_t prio; +#else /* LITTLE and PDP */ + volatile int8_t prio; + uint8_t sched_locked; +#endif + }; + uint16_t preempt; + }; /* data returned by APIs */ void *swap_data; diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index f83613ba63c..b80dc5eb8db 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -146,7 +146,8 @@ static inline int _is_coop(struct k_thread *thread) static inline int _is_preempt(struct k_thread *thread) { #ifdef CONFIG_PREEMPT_ENABLED - return !_is_coop(thread) && !thread->base.sched_locked; + /* explanation in kernel_struct.h */ + return thread->base.preempt <= _PREEMPT_THRESHOLD; #else return 0; #endif diff --git a/kernel/include/offsets_short.h b/kernel/include/offsets_short.h index 1b7c83159e5..079558b514d 100644 --- a/kernel/include/offsets_short.h +++ b/kernel/include/offsets_short.h @@ -65,6 +65,9 @@ #define _thread_offset_to_sched_locked \ (___thread_t_base_OFFSET + ___thread_base_t_sched_locked_OFFSET) +#define _thread_offset_to_preempt \ + (___thread_t_base_OFFSET + ___thread_base_t_preempt_OFFSET) + #define _thread_offset_to_esf \ (___thread_t_arch_OFFSET + ___thread_arch_t_esf_OFFSET)