kernel/sched: arch/x86_64: Support synchronous k_thread_abort() in SMP

Currently thread abort doesn't work if a thread is currently scheduled
on a different CPU, because we have no way of delivering an interrupt
to the other CPU to force the issue.  This patch adds a simple
framework for an architecture to provide such an IPI, implements it
for x86_64, and uses it to implement a spin loop in abort for the case
where a thread is currently scheduled elsewhere.

On SMP architectures (xtensa) where no such IPI is implemented, we
fall back to waiting on an arbitrary interrupt to occur.  This "works"
for typical code (and all current tests), but of course it cannot be
guaranteed on such an architecture that k_thread_abort() will return
in finite time (e.g. the other thread on the other CPU might have
taken a spinlock and entered an infinite loop, so it will never
receive an interrupt to terminate itself)!

On non-SMP architectures this patch changes no code paths at all.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2019-02-19 16:03:39 -08:00 committed by Anas Nashif
commit 42ed12a387
9 changed files with 123 additions and 11 deletions

View file

@ -35,6 +35,7 @@ config X86
config X86_64 config X86_64
bool "x86_64 architecture" bool "x86_64 architecture"
select ATOMIC_OPERATIONS_BUILTIN select ATOMIC_OPERATIONS_BUILTIN
select SCHED_IPI_SUPPORTED
config NIOS2 config NIOS2
bool "Nios II Gen 2 architecture" bool "Nios II Gen 2 architecture"

View file

@ -10,6 +10,12 @@
#include <irq_offload.h> #include <irq_offload.h>
#include "xuk.h" #include "xuk.h"
/* Always pick a lowest priority interrupt for scheduling IPI's, by
* definition they're done on behalf of thread mode code and should
* never preempt a true device interrupt
*/
#define SCHED_IPI_VECTOR 0x20
struct device; struct device;
struct NANO_ESF { struct NANO_ESF {
@ -112,6 +118,26 @@ void __weak x86_apic_timer_isr(void *arg, int code)
ARG_UNUSED(code); ARG_UNUSED(code);
} }
static void sched_ipi_handler(void *arg, int err)
{
ARG_UNUSED(arg);
ARG_UNUSED(err);
#ifdef CONFIG_SMP
z_sched_ipi();
#endif
}
void z_arch_sched_ipi(void)
{
_apic.ICR_HI = (struct apic_icr_hi) {};
_apic.ICR_LO = (struct apic_icr_lo) {
.delivery_mode = FIXED,
.vector = SCHED_IPI_VECTOR,
.shorthand = NOTSELF,
};
}
/* Called from xuk layer on actual CPU start */ /* Called from xuk layer on actual CPU start */
void _cpu_start(int cpu) void _cpu_start(int cpu)
{ {
@ -121,6 +147,9 @@ void _cpu_start(int cpu)
xuk_set_isr(INT_APIC_LVT_TIMER, 13, x86_apic_timer_isr, 0); xuk_set_isr(INT_APIC_LVT_TIMER, 13, x86_apic_timer_isr, 0);
_apic.INIT_COUNT = 0; _apic.INIT_COUNT = 0;
xuk_set_isr(XUK_INT_RAW_VECTOR(SCHED_IPI_VECTOR),
-1, sched_ipi_handler, 0);
#ifdef CONFIG_IRQ_OFFLOAD #ifdef CONFIG_IRQ_OFFLOAD
xuk_set_isr(XUK_INT_RAW_VECTOR(CONFIG_IRQ_OFFLOAD_VECTOR), xuk_set_isr(XUK_INT_RAW_VECTOR(CONFIG_IRQ_OFFLOAD_VECTOR),
-1, irq_offload_handler, 0); -1, irq_offload_handler, 0);

View file

@ -102,4 +102,6 @@ extern int x86_64_except_reason;
__asm__ volatile("int $5"); \ __asm__ volatile("int $5"); \
} while (false) } while (false)
void z_arch_sched_ipi(void);
#endif /* _KERNEL_ARCH_FUNC_H */ #endif /* _KERNEL_ARCH_FUNC_H */

View file

@ -715,6 +715,17 @@ config MP_NUM_CPUS
Number of multiprocessing-capable cores available to the Number of multiprocessing-capable cores available to the
multicpu API and SMP features. multicpu API and SMP features.
config SCHED_IPI_SUPPORTED
bool "Architecture supports broadcast interprocessor interrupts"
help
True if the architecture supports a call to
z_arch_sched_ipi() to broadcast an interrupt that will call
z_sched_ipi() on other CPUs in the system. Required for
k_thread_abort() to operate with reasonable latency
(otherwise we might have to wait for the other thread to
take an interrupt, which can be arbitrarily far in the
future).
endmenu endmenu
config TICKLESS_IDLE config TICKLESS_IDLE

View file

@ -46,6 +46,9 @@
/* Thread is suspended */ /* Thread is suspended */
#define _THREAD_SUSPENDED (BIT(4)) #define _THREAD_SUSPENDED (BIT(4))
/* Thread is being aborted (SMP only) */
#define _THREAD_ABORTING (BIT(5))
/* Thread is present in the ready queue */ /* Thread is present in the ready queue */
#define _THREAD_QUEUED (BIT(6)) #define _THREAD_QUEUED (BIT(6))

View file

@ -53,6 +53,8 @@ struct k_thread *z_find_first_thread_to_unpend(_wait_q_t *wait_q,
struct k_thread *from); struct k_thread *from);
void idle(void *a, void *b, void *c); void idle(void *a, void *b, void *c);
void z_time_slice(int ticks); void z_time_slice(int ticks);
void z_sched_abort(struct k_thread *thread);
void z_sched_ipi(void);
static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout) static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout)
{ {

View file

@ -464,13 +464,6 @@ void z_unpend_thread(struct k_thread *thread)
(void)z_abort_thread_timeout(thread); (void)z_abort_thread_timeout(thread);
} }
/* FIXME: this API is glitchy when used in SMP. If the thread is
* currently scheduled on the other CPU, it will silently set it's
* priority but nothing will cause a reschedule until the next
* interrupt. An audit seems to show that all current usage is to set
* priorities on either _current or a pended thread, though, so it's
* fine for now.
*/
void z_thread_priority_set(struct k_thread *thread, int prio) void z_thread_priority_set(struct k_thread *thread, int prio)
{ {
bool need_sched = 0; bool need_sched = 0;
@ -489,6 +482,11 @@ void z_thread_priority_set(struct k_thread *thread, int prio)
} }
sys_trace_thread_priority_set(thread); sys_trace_thread_priority_set(thread);
if (IS_ENABLED(CONFIG_SMP) &&
!IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
z_sched_ipi();
}
if (need_sched && _current->base.sched_locked == 0) { if (need_sched && _current->base.sched_locked == 0) {
z_reschedule_unlocked(); z_reschedule_unlocked();
} }
@ -594,6 +592,15 @@ void *z_get_next_switch_handle(void *interrupted)
#endif #endif
#endif #endif
/* Some architectures don't have a working IPI, so the best we
* can do there is check the abort status of the current
* thread here on ISR exit
*/
if (IS_ENABLED(CONFIG_SMP) &&
!IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
z_sched_ipi();
}
z_check_stack_sentinel(); z_check_stack_sentinel();
return _current->switch_handle; return _current->switch_handle;
@ -957,8 +964,61 @@ void z_impl_k_wakeup(k_tid_t thread)
if (!z_is_in_isr()) { if (!z_is_in_isr()) {
z_reschedule_unlocked(); z_reschedule_unlocked();
} }
if (IS_ENABLED(CONFIG_SMP) &&
!IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
z_sched_ipi();
}
} }
#ifdef CONFIG_SMP
/* Called out of the scheduler interprocessor interrupt. All it does
* is flag the current thread as dead if it needs to abort, so the ISR
* return into something else and the other thread which called
* k_thread_abort() can finish its work knowing the thing won't be
* rescheduled.
*/
void z_sched_ipi(void)
{
LOCKED(&sched_spinlock) {
if (_current->base.thread_state & _THREAD_ABORTING) {
_current->base.thread_state |= _THREAD_DEAD;
_current_cpu->swap_ok = true;
}
}
}
void z_sched_abort(struct k_thread *thread)
{
if (thread == _current) {
z_remove_thread_from_ready_q(thread);
return;
}
/* First broadcast an IPI to the other CPUs so they can stop
* it locally. Not all architectures support that, alas. If
* we don't have it, we need to wait for some other interrupt.
*/
thread->base.thread_state |= _THREAD_ABORTING;
#ifdef CONFIG_SCHED_IPI_SUPPORTED
z_arch_sched_ipi();
#endif
/* Wait for it to be flagged dead either by the CPU it was
* running on or because we caught it idle in the queue
*/
while ((thread->base.thread_state & _THREAD_DEAD) == 0) {
LOCKED(&sched_spinlock) {
if (z_is_thread_queued(thread)) {
_current->base.thread_state |= _THREAD_DEAD;
_priq_run_remove(&_kernel.ready_q.runq, thread);
z_mark_thread_as_not_queued(thread);
}
}
}
}
#endif
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_wakeup, K_OBJ_THREAD, k_tid_t); Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_wakeup, K_OBJ_THREAD, k_tid_t);
#endif #endif

View file

@ -60,13 +60,11 @@ void z_smp_release_global_lock(struct k_thread *thread)
} }
} }
#endif
extern k_thread_stack_t _interrupt_stack1[]; extern k_thread_stack_t _interrupt_stack1[];
extern k_thread_stack_t _interrupt_stack2[]; extern k_thread_stack_t _interrupt_stack2[];
extern k_thread_stack_t _interrupt_stack3[]; extern k_thread_stack_t _interrupt_stack3[];
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1 #if CONFIG_MP_NUM_CPUS > 1
static void smp_init_top(int key, void *arg) static void smp_init_top(int key, void *arg)
{ {
atomic_t *start_flag = arg; atomic_t *start_flag = arg;
@ -89,6 +87,7 @@ static void smp_init_top(int key, void *arg)
CODE_UNREACHABLE; CODE_UNREACHABLE;
} }
#endif
void smp_init(void) void smp_init(void)
{ {
@ -111,4 +110,5 @@ void smp_init(void)
(void)atomic_set(&start_flag, 1); (void)atomic_set(&start_flag, 1);
} }
#endif
#endif /* CONFIG_SMP */

View file

@ -582,6 +582,10 @@ void z_thread_single_abort(struct k_thread *thread)
thread->fn_abort(); thread->fn_abort();
} }
if (IS_ENABLED(CONFIG_SMP)) {
z_sched_abort(thread);
}
if (z_is_thread_ready(thread)) { if (z_is_thread_ready(thread)) {
z_remove_thread_from_ready_q(thread); z_remove_thread_from_ready_q(thread);
} else { } else {