diff --git a/kernel/include/kernel_internal.h b/kernel/include/kernel_internal.h index 2a3e986a435..815f8b70e54 100644 --- a/kernel/include/kernel_internal.h +++ b/kernel/include/kernel_internal.h @@ -174,6 +174,8 @@ extern void _thread_monitor_exit(struct k_thread *thread); } while (0) #endif /* CONFIG_THREAD_MONITOR */ +extern void smp_init(void); + #ifdef __cplusplus } #endif diff --git a/kernel/init.c b/kernel/init.c index 4d2f02996f5..fab90838352 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -359,14 +359,23 @@ static void prepare_multithreading(struct k_thread *dummy_thread) #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1 init_idle_thread(_idle_thread1, _idle_stack1); + _kernel.cpus[1].id = 1; + _kernel.cpus[1].irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack1) + + CONFIG_ISR_STACK_SIZE; #endif #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2 init_idle_thread(_idle_thread2, _idle_stack2); + _kernel.cpus[2].id = 2; + _kernel.cpus[2].irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack2) + + CONFIG_ISR_STACK_SIZE; #endif #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3 init_idle_thread(_idle_thread3, _idle_stack3); + _kernel.cpus[3].id = 3; + _kernel.cpus[3].irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack3) + + CONFIG_ISR_STACK_SIZE; #endif initialize_timeouts(); @@ -435,6 +444,10 @@ FUNC_NORETURN void _Cstart(void) __stack_chk_guard = (void *)sys_rand32_get(); #endif +#ifdef CONFIG_SMP + smp_init(); +#endif + /* display boot banner */ switch_to_main_thread(); diff --git a/kernel/smp.c b/kernel/smp.c index 1a1b6686885..589cd62d599 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -7,6 +7,8 @@ #include #include #include +#include +#include static struct k_spinlock global_spinlock; @@ -48,3 +50,56 @@ void _smp_global_unlock(unsigned int key) recursive_count = 0; k_spin_unlock(&global_spinlock, sk); } + +extern k_thread_stack_t _interrupt_stack1[]; +extern k_thread_stack_t _interrupt_stack2[]; +extern k_thread_stack_t _interrupt_stack3[]; + +#ifdef CONFIG_SMP +static void _smp_init_top(int key, void *arg) +{ + atomic_t *start_flag = arg; + + /* Wait for the signal to begin scheduling */ + do { + k_busy_wait(100); + } while (!atomic_get(start_flag)); + + /* Switch out of a dummy thread. Trick cribbed from the main + * thread init. Should probably unify implementations. + */ + struct k_thread dummy_thread = { + .base.user_options = K_ESSENTIAL, + .base.thread_state = _THREAD_DUMMY, + }; + + _arch_curr_cpu()->current = &dummy_thread; + _Swap(irq_lock()); + + CODE_UNREACHABLE; +} +#endif + +void smp_init(void) +{ + atomic_t start_flag; + + atomic_clear(&start_flag); + +#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1 + _arch_start_cpu(1, _interrupt_stack1, CONFIG_ISR_STACK_SIZE, + _smp_init_top, &start_flag); +#endif + +#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2 + _arch_start_cpu(2, _interrupt_stack2, CONFIG_ISR_STACK_SIZE, + _smp_init_top, &start_flag); +#endif + +#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3 + _arch_start_cpu(3, _interrupt_stack3, CONFIG_ISR_STACK_SIZE, + _smp_init_top, &start_flag); +#endif + + atomic_set(&start_flag, 1); +}