kernel: Enable SMP

Now that all the pieces are in place, enable SMP for real:

Initialize the CPU records, launch the CPUs at the end of kernel
initialization, have them wait for a flag to release them into the
scheduler, then enter into the runnable threads via _Swap().

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-01-17 11:34:50 -08:00 committed by Anas Nashif
commit bdcd18a744
3 changed files with 70 additions and 0 deletions

View file

@ -174,6 +174,8 @@ extern void _thread_monitor_exit(struct k_thread *thread);
} while (0)
#endif /* CONFIG_THREAD_MONITOR */
extern void smp_init(void);
#ifdef __cplusplus
}
#endif

View file

@ -359,14 +359,23 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1
init_idle_thread(_idle_thread1, _idle_stack1);
_kernel.cpus[1].id = 1;
_kernel.cpus[1].irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack1)
+ CONFIG_ISR_STACK_SIZE;
#endif
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2
init_idle_thread(_idle_thread2, _idle_stack2);
_kernel.cpus[2].id = 2;
_kernel.cpus[2].irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack2)
+ CONFIG_ISR_STACK_SIZE;
#endif
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3
init_idle_thread(_idle_thread3, _idle_stack3);
_kernel.cpus[3].id = 3;
_kernel.cpus[3].irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack3)
+ CONFIG_ISR_STACK_SIZE;
#endif
initialize_timeouts();
@ -435,6 +444,10 @@ FUNC_NORETURN void _Cstart(void)
__stack_chk_guard = (void *)sys_rand32_get();
#endif
#ifdef CONFIG_SMP
smp_init();
#endif
/* display boot banner */
switch_to_main_thread();

View file

@ -7,6 +7,8 @@
#include <kernel.h>
#include <kernel_structs.h>
#include <spinlock.h>
#include <kswap.h>
#include <nano_internal.h>
static struct k_spinlock global_spinlock;
@ -48,3 +50,56 @@ void _smp_global_unlock(unsigned int key)
recursive_count = 0;
k_spin_unlock(&global_spinlock, sk);
}
extern k_thread_stack_t _interrupt_stack1[];
extern k_thread_stack_t _interrupt_stack2[];
extern k_thread_stack_t _interrupt_stack3[];
#ifdef CONFIG_SMP
static void _smp_init_top(int key, void *arg)
{
atomic_t *start_flag = arg;
/* Wait for the signal to begin scheduling */
do {
k_busy_wait(100);
} while (!atomic_get(start_flag));
/* Switch out of a dummy thread. Trick cribbed from the main
* thread init. Should probably unify implementations.
*/
struct k_thread dummy_thread = {
.base.user_options = K_ESSENTIAL,
.base.thread_state = _THREAD_DUMMY,
};
_arch_curr_cpu()->current = &dummy_thread;
_Swap(irq_lock());
CODE_UNREACHABLE;
}
#endif
void smp_init(void)
{
atomic_t start_flag;
atomic_clear(&start_flag);
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1
_arch_start_cpu(1, _interrupt_stack1, CONFIG_ISR_STACK_SIZE,
_smp_init_top, &start_flag);
#endif
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2
_arch_start_cpu(2, _interrupt_stack2, CONFIG_ISR_STACK_SIZE,
_smp_init_top, &start_flag);
#endif
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3
_arch_start_cpu(3, _interrupt_stack3, CONFIG_ISR_STACK_SIZE,
_smp_init_top, &start_flag);
#endif
atomic_set(&start_flag, 1);
}