zephyr/kernel/smp.c
Andrew Boie 80a0d9d16b kernel: interrupt/idle stacks/threads as array
The set of interrupt stacks is now expressed as an array. We
also define the idle threads and their associated stacks this
way. This allows for iteration in cases where we have multiple
CPUs.

There is now a centralized declaration in kernel_internal.h.

On uniprocessor systems, z_interrupt_stacks has one element
and can be used in the same way as _interrupt_stack.

The IRQ stack for CPU 0 is now set in init.c instead of in
arch code.

The extern definition of the main thread stack is now removed,
this doesn't need to be in a header.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2020-03-16 23:17:36 +02:00

111 lines
2.1 KiB
C

/*
* Copyright (c) 2018 Intel corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <kernel_structs.h>
#include <spinlock.h>
#include <kswap.h>
#include <kernel_internal.h>
#ifdef CONFIG_SMP
static atomic_t global_lock;
static atomic_t start_flag;
unsigned int z_smp_global_lock(void)
{
unsigned int key = arch_irq_lock();
if (!_current->base.global_lock_count) {
while (!atomic_cas(&global_lock, 0, 1)) {
}
}
_current->base.global_lock_count++;
return key;
}
void z_smp_global_unlock(unsigned int key)
{
if (_current->base.global_lock_count) {
_current->base.global_lock_count--;
if (!_current->base.global_lock_count) {
atomic_clear(&global_lock);
}
}
arch_irq_unlock(key);
}
void z_smp_reacquire_global_lock(struct k_thread *thread)
{
if (thread->base.global_lock_count) {
arch_irq_lock();
while (!atomic_cas(&global_lock, 0, 1)) {
}
}
}
/* Called from within z_swap(), so assumes lock already held */
void z_smp_release_global_lock(struct k_thread *thread)
{
if (!thread->base.global_lock_count) {
atomic_clear(&global_lock);
}
}
#if CONFIG_MP_NUM_CPUS > 1
static FUNC_NORETURN void smp_init_top(void *arg)
{
atomic_t *start_flag = arg;
/* Wait for the signal to begin scheduling */
while (!atomic_get(start_flag)) {
}
/* Switch out of a dummy thread. Trick cribbed from the main
* thread init. Should probably unify implementations.
*/
struct k_thread dummy_thread = {
.base.user_options = K_ESSENTIAL,
.base.thread_state = _THREAD_DUMMY,
};
arch_curr_cpu()->current = &dummy_thread;
smp_timer_init();
z_swap_unlocked();
CODE_UNREACHABLE;
}
#endif
void z_smp_init(void)
{
(void)atomic_clear(&start_flag);
#if defined(CONFIG_SMP) && (CONFIG_MP_NUM_CPUS > 1)
for (int i = 1; i < CONFIG_MP_NUM_CPUS; i++) {
arch_start_cpu(i, z_interrupt_stacks[i], CONFIG_ISR_STACK_SIZE,
smp_init_top, &start_flag);
}
#endif
(void)atomic_set(&start_flag, 1);
}
bool z_smp_cpu_mobile(void)
{
unsigned int k = arch_irq_lock();
bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k);
arch_irq_unlock(k);
return !pinned;
}
#endif /* CONFIG_SMP */