diff --git a/arch/arc/core/fast_irq.S b/arch/arc/core/fast_irq.S index a1272a3dff0..9fb04f154cc 100644 --- a/arch/arc/core/fast_irq.S +++ b/arch/arc/core/fast_irq.S @@ -149,13 +149,6 @@ SECTION_FUNC(TEXT, _firq_exit) mov_s r1, _kernel ld_s r2, [r1, _kernel_offset_to_current] - /* - * Non-preemptible thread ? Do not schedule (see explanation of - * preempt field in kernel_struct.h). - */ - ldh_s r0, [r2, _thread_offset_to_preempt] - brhs r0, _NON_PREEMPT_THRESHOLD, _firq_no_reschedule - /* Check if the current thread (in r2) is the cached thread */ ld_s r0, [r1, _kernel_offset_to_ready_q_cache] brne r0, r2, _firq_reschedule diff --git a/arch/arc/core/regular_irq.S b/arch/arc/core/regular_irq.S index ebca20bcf48..cdbac487dfc 100644 --- a/arch/arc/core/regular_irq.S +++ b/arch/arc/core/regular_irq.S @@ -109,16 +109,6 @@ SECTION_FUNC(TEXT, _rirq_exit) * point on until return from interrupt. */ - - /* - * Non-preemptible thread ? Do not schedule (see explanation of - * preempt field in kernel_struct.h). - */ - ldh_s r0, [r2, _thread_offset_to_preempt] - mov r3, _NON_PREEMPT_THRESHOLD - cmp_s r0, r3 - bhs.d _rirq_no_reschedule - /* * Both (a)reschedule and (b)non-reschedule cases need to load the * current thread's stack, but don't have to use it until the decision diff --git a/arch/arm/core/exc_exit.S b/arch/arm/core/exc_exit.S index b7363e6b0df..907001286c7 100644 --- a/arch/arm/core/exc_exit.S +++ b/arch/arm/core/exc_exit.S @@ -75,14 +75,6 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit) ldr r1, [r0, #_kernel_offset_to_current] - /* - * Non-preemptible thread ? Do not schedule (see explanation of - * preempt field in kernel_struct.h). - */ - ldrh r2, [r1, #_thread_offset_to_preempt] - cmp r2, #_PREEMPT_THRESHOLD - bhi _EXIT_EXC - ldr r0, [r0, _kernel_offset_to_ready_q_cache] cmp r0, r1 beq _EXIT_EXC diff --git a/arch/nios2/core/exception.S b/arch/nios2/core/exception.S index d35d1f5d0ea..599f677f321 100644 --- a/arch/nios2/core/exception.S +++ b/arch/nios2/core/exception.S @@ -123,14 +123,6 @@ on_irq_stack: * switch */ - /* - * Non-preemptible thread ? Do not schedule (see explanation of - * preempt field in kernel_struct.h). - */ - ldhu r12, _thread_offset_to_preempt(r11) - movui r3, _NON_PREEMPT_THRESHOLD - bgeu r12, r3, no_reschedule - /* Call into the kernel to see if a scheduling decision is necessary */ ldw r2, _kernel_offset_to_ready_q_cache(r10) beq r2, r11, no_reschedule diff --git a/arch/riscv32/core/isr.S b/arch/riscv32/core/isr.S index 867d2019250..7b7e3741eb5 100644 --- a/arch/riscv32/core/isr.S +++ b/arch/riscv32/core/isr.S @@ -291,14 +291,6 @@ on_thread_stack: /* Get pointer to _kernel.current */ lw t2, _kernel_offset_to_current(t1) - /* - * If non-preemptible thread, do not schedule - * (see explanation of preempt field in kernel_structs.h - */ - lhu t3, _thread_offset_to_preempt(t2) - li t4, _NON_PREEMPT_THRESHOLD - bgeu t3, t4, no_reschedule - /* * Check if next thread to schedule is current thread. * If yes do not perform a reschedule diff --git a/arch/x86/core/intstub.S b/arch/x86/core/intstub.S index 151b090da13..e232c276614 100644 --- a/arch/x86/core/intstub.S +++ b/arch/x86/core/intstub.S @@ -255,14 +255,6 @@ alreadyOnIntStack: #ifdef CONFIG_PREEMPT_ENABLED movl _kernel_offset_to_current(%ecx), %edx - /* - * Non-preemptible thread ? Do not schedule (see explanation of - * preempt field in kernel_struct.h). - */ - cmpw $_NON_PREEMPT_THRESHOLD, _thread_offset_to_preempt(%edx) - jae noReschedule - - /* reschedule only if the scheduler says that we must do so */ cmpl %edx, _kernel_offset_to_ready_q_cache(%ecx) je noReschedule diff --git a/arch/x86/core/irq_manage.c b/arch/x86/core/irq_manage.c index 419c8d9aeb5..e4e509affe0 100644 --- a/arch/x86/core/irq_manage.c +++ b/arch/x86/core/irq_manage.c @@ -82,11 +82,9 @@ void _arch_isr_direct_footer(int swap) * * 1) swap argument was enabled to this function * 2) We are not in a nested interrupt - * 3) Current thread is preemptible - * 4) Next thread to run in the ready queue is not this thread + * 3) Next thread to run in the ready queue is not this thread */ if (swap && !_kernel.nested && - _current->base.preempt < _NON_PREEMPT_THRESHOLD && _kernel.ready_q.cache != _current) { unsigned int flags; diff --git a/arch/xtensa/core/xt_zephyr.S b/arch/xtensa/core/xt_zephyr.S index 69ebfd72d57..228d443e1f8 100644 --- a/arch/xtensa/core/xt_zephyr.S +++ b/arch/xtensa/core/xt_zephyr.S @@ -191,13 +191,6 @@ _zxt_int_exit: wsr a3, CPENABLE /* disable all co-processors */ #endif l32i a3, a2, KERNEL_OFFSET(current) /* _thread := _kernel.current */ - /* - * Non-preemptible thread ? Do not schedule (see explanation of - * preempt field in kernel_struct.h). - */ - movi a4, _NON_PREEMPT_THRESHOLD - l16ui a5, a3, THREAD_OFFSET(preempt) - bgeu a5, a4, .noReschedule /* _thread := _kernel.ready_q.cache */ l32i a3, a2, KERNEL_OFFSET(ready_q_cache) .noReschedule: diff --git a/kernel/sched.c b/kernel/sched.c index c7fa3bb3884..059a2cd0efd 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -176,7 +176,6 @@ static void update_cache(int preempt_ok) th = _current; } } - _kernel.ready_q.cache = th; #endif } @@ -339,7 +338,6 @@ void _thread_priority_set(struct k_thread *thread, int prio) int _reschedule(int key) { if (!_is_in_isr() && - _is_preempt(_current) && _get_next_ready_thread() != _current) { return _Swap(key); } @@ -389,19 +387,10 @@ struct k_thread *_get_next_ready_thread(void) #ifdef CONFIG_USE_SWITCH void *_get_next_switch_handle(void *interrupted) { - if (!_is_preempt(_current) && - !(_current->base.thread_state & _THREAD_DEAD)) { - return interrupted; - } - _current->switch_handle = interrupted; LOCKED(&sched_lock) { - struct k_thread *next = next_up(); - - if (next != _current) { - _current = next; - } + _current = _get_next_ready_thread(); } _check_stack_sentinel();