From 3509cffac67bef834536a1bb0c2814900c3130a4 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 1 Apr 2021 20:47:46 +0200 Subject: [PATCH] xtensa: fix delayed booting secondary cores With SOF secondary cores are booted later at run-time instead of the traditional simultaneous booting of all the cores. Adjust arch_start_cpu() to make that possible. Signed-off-by: Guennadi Liakhovetski --- include/sys/arch_interface.h | 8 +++++++ soc/xtensa/intel_adsp/common/soc_mp.c | 30 +++++++++++++++++++++++++-- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/include/sys/arch_interface.h b/include/sys/arch_interface.h index 61033601b37..4db51dca09f 100644 --- a/include/sys/arch_interface.h +++ b/include/sys/arch_interface.h @@ -216,6 +216,14 @@ typedef FUNC_NORETURN void (*arch_cpustart_t)(void *data); */ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, arch_cpustart_t fn, void *arg); + +/** + * @brief Return CPU power status + * + * @param cpu_num Integer number of the CPU + */ +bool arch_cpu_active(int cpu_num); + /** @} */ diff --git a/soc/xtensa/intel_adsp/common/soc_mp.c b/soc/xtensa/intel_adsp/common/soc_mp.c index d18913998ab..ee5b20f6e13 100644 --- a/soc/xtensa/intel_adsp/common/soc_mp.c +++ b/soc/xtensa/intel_adsp/common/soc_mp.c @@ -89,6 +89,8 @@ static __aligned(XCHAL_DCACHE_LINESIZE) union { (*((volatile struct cpustart_rec *) \ z_soc_uncached_ptr(&cpustart_mem.cpustart))) +static uint32_t cpu_mask; + /* Tiny assembly stub for calling z_mp_entry() on the auxiliary CPUs. * Mask interrupts, clear the register window state and set the stack * pointer. This represents the minimum work required to run C code @@ -112,6 +114,8 @@ __asm__(".align 4 \n\t" " call4 z_mp_entry \n\t"); BUILD_ASSERT(XCHAL_EXCM_LEVEL == 5); +int cavs_idc_smp_init(const struct device *dev); + void z_mp_entry(void) { volatile int ie; @@ -152,6 +156,10 @@ void z_mp_entry(void) irq_enable(DT_IRQN(DT_INST(0, intel_cavs_idc))); #endif /* CONFIG_IPM_CAVS_IDC */ +#ifdef CONFIG_SMP_BOOT_DELAY + cavs_idc_smp_init(NULL); +#endif + start_rec.alive = 1; start_rec.fn(start_rec.arg); @@ -168,6 +176,11 @@ void z_mp_entry(void) #endif } +bool arch_cpu_active(int cpu_num) +{ + return !!(cpu_mask & BIT(cpu_num)); +} + void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, arch_cpustart_t fn, void *arg) { @@ -195,6 +208,7 @@ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, idc_reg = idc_read(IPC_IDCCTL, cpu_num); idc_reg |= IPC_IDCCTL_IDCTBIE(0); idc_write(IPC_IDCCTL, cpu_num, idc_reg); + /* FIXME: 8 is IRQ_BIT_LVL2_IDC / PLATFORM_IDC_INTERRUPT */ sys_set_bit(DT_REG_ADDR(DT_NODELABEL(cavs0)) + 0x04 + CAVS_ICTL_INT_CPU_OFFSET(cpu_num), 8); @@ -213,12 +227,24 @@ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, sys_set_bit(DT_REG_ADDR(DT_NODELABEL(cavs0)) + 0x00 + CAVS_ICTL_INT_CPU_OFFSET(cpu_num), 8); - while (start_rec.alive == 0) { - } + k_busy_wait(100); + +#ifdef CONFIG_SMP_BOOT_DELAY + cavs_idc_smp_init(NULL); +#endif /* Clear done bit from responding the power up message */ idc_reg = idc_read(IPC_IDCIETC(cpu_num), 0) | IPC_IDCIETC_DONE; idc_write(IPC_IDCIETC(cpu_num), 0, idc_reg); + + while (!start_rec.alive) + ; + + /* + * No locking needed as long as CPUs can only be powered on by the main + * CPU and cannot be powered off + */ + cpu_mask |= BIT(cpu_num); } #ifdef CONFIG_SCHED_IPI_SUPPORTED