arch: arm: aarch64: add SMP support

With timer/gic/cache added, we could add the SMP support.
Bringup cores

Signed-off-by: Peng Fan <peng.fan@nxp.com>
This commit is contained in:
Peng Fan 2020-11-09 15:58:15 +08:00 committed by Anas Nashif
commit a2ea20dd6d
7 changed files with 189 additions and 0 deletions

View file

@ -29,3 +29,6 @@ zephyr_library_sources_ifdef(CONFIG_AARCH64_IMAGE_HEADER header.S)
add_subdirectory_ifdef(CONFIG_ARM_MMU mmu)
zephyr_library_sources_ifdef(CONFIG_CACHE_MANAGEMENT cache.S)
zephyr_library_sources_ifdef(CONFIG_CACHE_MANAGEMENT cache.c)
if ((CONFIG_MP_NUM_CPUS GREATER 1) OR (CONFIG_SMP))
zephyr_library_sources(smp.c)
endif ()

View file

@ -9,6 +9,11 @@
#ifdef _ASMLANGUAGE
.macro z_arm64_get_cpu_id xreg0
mrs \xreg0, mpidr_el1
/* FIMXME: aff3 not taken into consideration */
ubfx \xreg0, \xreg0, #0, #24
.endm
/*
* Increment nested counter
*/

View file

@ -55,3 +55,13 @@ void z_arm64_prep_c(void)
CODE_UNREACHABLE;
}
#if CONFIG_MP_NUM_CPUS > 1
extern FUNC_NORETURN void z_arm64_secondary_start(void);
void z_arm64_secondary_prep_c(void)
{
z_arm64_secondary_start();
CODE_UNREACHABLE;
}
#endif

View file

@ -75,6 +75,11 @@ out:
/* Select SP_EL0 */
msr SPSel, #0
#if CONFIG_MP_NUM_CPUS > 1
z_arm64_get_cpu_id x0
cbnz x0, L_secondary_stack
#endif
/* Initialize stack */
ldr x0, =(z_interrupt_stacks)
add x0, x0, #(CONFIG_ISR_STACK_SIZE)
@ -82,6 +87,21 @@ out:
ret x23
#if CONFIG_MP_NUM_CPUS > 1
L_secondary_stack:
z_arm64_get_cpu_id x1
adr x0, arm64_cpu_init
mov x2, #ARM64_CPU_INIT_SIZE
madd x0, x1, x2, x0
ldr x0, [x0]
cbz x0, L_enable_secondary
dmb ld
mov sp, x0
ret x23
#endif
/*
* Reset vector
*
@ -135,4 +155,14 @@ switch_el:
msr DAIFClr, #(DAIFCLR_ABT_BIT)
isb
#if CONFIG_MP_NUM_CPUS > 1
z_arm64_get_cpu_id x0
cbnz x0, L_enable_secondary
#endif
b z_arm64_prep_c
#if CONFIG_MP_NUM_CPUS > 1
L_enable_secondary:
b z_arm64_secondary_prep_c
#endif

133
arch/arm/core/aarch64/smp.c Normal file
View file

@ -0,0 +1,133 @@
/*
* Copyright 2020 NXP
*
* SPDX-License-Identifier: Apache-2.0
*
*/
/**
* @file
* @brief codes required for AArch64 multicore and Zephyr smp support
*/
#include <cache.h>
#include <device.h>
#include <kernel.h>
#include <kernel_structs.h>
#include <ksched.h>
#include <soc.h>
#include <init.h>
#include <arch/arm/aarch64/arm_mmu.h>
#include <arch/cpu.h>
#include <drivers/interrupt_controller/gic.h>
#include <drivers/pm_cpu_ops.h>
#include <sys/arch_interface.h>
#define SGI_SCHED_IPI 0
volatile struct {
void *sp; /* Fixed at the first entry */
arch_cpustart_t fn;
void *arg;
char pad[] __aligned(L1_CACHE_BYTES);
} arm64_cpu_init[CONFIG_MP_NUM_CPUS];
/*
* _curr_cpu is used to record the struct of _cpu_t of each cpu.
* for efficient usage in assembly
*/
volatile _cpu_t *_curr_cpu[CONFIG_MP_NUM_CPUS];
extern void __start(void);
/* Called from Zephyr initialization */
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
arch_cpustart_t fn, void *arg)
{
__ASSERT(sizeof(arm64_cpu_init[0]) == ARM64_CPU_INIT_SIZE,
"ARM64_CPU_INIT_SIZE != sizeof(arm64_cpu_init[0]\n");
_curr_cpu[cpu_num] = &(_kernel.cpus[cpu_num]);
arm64_cpu_init[cpu_num].fn = fn;
arm64_cpu_init[cpu_num].arg = arg;
arm64_cpu_init[cpu_num].sp =
(void *)(Z_THREAD_STACK_BUFFER(stack) + sz);
arch_dcache_range((void *)&arm64_cpu_init[cpu_num],
sizeof(arm64_cpu_init[cpu_num]), K_CACHE_WB_INVD);
/* TODO: get mpidr from device tree, using cpu_num */
if (pm_cpu_on(cpu_num, (uint64_t)&__start))
printk("Failed to boot CPU%d\n", cpu_num);
/* Wait secondary cores up, see z_arm64_secondary_start */
while (arm64_cpu_init[cpu_num].fn) {
wfe();
}
}
/* the C entry of secondary cores */
void z_arm64_secondary_start(void)
{
arch_cpustart_t fn;
int cpu_num = MPIDR_TO_CORE(GET_MPIDR());
z_arm64_mmu_init();
#ifdef CONFIG_SMP
arm_gic_secondary_init();
irq_enable(SGI_SCHED_IPI);
#endif
fn = arm64_cpu_init[cpu_num].fn;
/*
* Secondary core clears .fn to announce its presence.
* Primary core is polling for this.
*/
arm64_cpu_init[cpu_num].fn = NULL;
dsb();
sev();
fn(arm64_cpu_init[cpu_num].arg);
}
#ifdef CONFIG_SMP
void sched_ipi_handler(const void *unused)
{
ARG_UNUSED(unused);
z_sched_ipi();
}
/* arch implementation of sched_ipi */
void arch_sched_ipi(void)
{
const uint64_t mpidr = GET_MPIDR();
/*
* Send SGI to all cores except itself
* Note: Assume only one Cluster now.
*/
gic_raise_sgi(SGI_SCHED_IPI, mpidr, SGIR_TGT_MASK & ~(1 << MPIDR_TO_CORE(mpidr)));
}
static int arm64_smp_init(const struct device *dev)
{
ARG_UNUSED(dev);
/* necessary master core init */
_curr_cpu[0] = &(_kernel.cpus[0]);
/*
* SGI0 is use for sched ipi, this might be changed to use Kconfig
* option
*/
IRQ_CONNECT(SGI_SCHED_IPI, IRQ_DEFAULT_PRIORITY, sched_ipi_handler, NULL, 0);
irq_enable(SGI_SCHED_IPI);
return 0;
}
SYS_INIT(arm64_smp_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif

View file

@ -202,4 +202,8 @@
#endif /* CONFIG_CPU_CORTEX_A72 */
#define L1_CACHE_SHIFT (6)
#define L1_CACHE_BYTES BIT(L1_CACHE_SHIFT)
#define ARM64_CPU_INIT_SIZE L1_CACHE_BYTES
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_CPU_H_ */

View file

@ -127,6 +127,10 @@ static ALWAYS_INLINE void disable_fiq(void)
:: "i" (DAIFSET_FIQ_BIT) : "memory");
}
#define sev() __asm__ volatile("sev" : : : "memory")
#define wfe() __asm__ volatile("wfe" : : : "memory")
#define wfi() __asm__ volatile("wfi" : : : "memory")
#define dsb() __asm__ volatile ("dsb sy" ::: "memory")
#define dmb() __asm__ volatile ("dmb sy" ::: "memory")
#define isb() __asm__ volatile ("isb" ::: "memory")