xtensa: fix delayed booting secondary cores

With SOF secondary cores are booted later at run-time instead
of the traditional simultaneous booting of all the cores.
Adjust arch_start_cpu() to make that possible.

Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
This commit is contained in:
Guennadi Liakhovetski 2021-04-01 20:47:46 +02:00 committed by Anas Nashif
commit 3509cffac6
2 changed files with 36 additions and 2 deletions

View file

@ -216,6 +216,14 @@ typedef FUNC_NORETURN void (*arch_cpustart_t)(void *data);
*/ */
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
arch_cpustart_t fn, void *arg); arch_cpustart_t fn, void *arg);
/**
* @brief Return CPU power status
*
* @param cpu_num Integer number of the CPU
*/
bool arch_cpu_active(int cpu_num);
/** @} */ /** @} */

View file

@ -89,6 +89,8 @@ static __aligned(XCHAL_DCACHE_LINESIZE) union {
(*((volatile struct cpustart_rec *) \ (*((volatile struct cpustart_rec *) \
z_soc_uncached_ptr(&cpustart_mem.cpustart))) z_soc_uncached_ptr(&cpustart_mem.cpustart)))
static uint32_t cpu_mask;
/* Tiny assembly stub for calling z_mp_entry() on the auxiliary CPUs. /* Tiny assembly stub for calling z_mp_entry() on the auxiliary CPUs.
* Mask interrupts, clear the register window state and set the stack * Mask interrupts, clear the register window state and set the stack
* pointer. This represents the minimum work required to run C code * pointer. This represents the minimum work required to run C code
@ -112,6 +114,8 @@ __asm__(".align 4 \n\t"
" call4 z_mp_entry \n\t"); " call4 z_mp_entry \n\t");
BUILD_ASSERT(XCHAL_EXCM_LEVEL == 5); BUILD_ASSERT(XCHAL_EXCM_LEVEL == 5);
int cavs_idc_smp_init(const struct device *dev);
void z_mp_entry(void) void z_mp_entry(void)
{ {
volatile int ie; volatile int ie;
@ -152,6 +156,10 @@ void z_mp_entry(void)
irq_enable(DT_IRQN(DT_INST(0, intel_cavs_idc))); irq_enable(DT_IRQN(DT_INST(0, intel_cavs_idc)));
#endif /* CONFIG_IPM_CAVS_IDC */ #endif /* CONFIG_IPM_CAVS_IDC */
#ifdef CONFIG_SMP_BOOT_DELAY
cavs_idc_smp_init(NULL);
#endif
start_rec.alive = 1; start_rec.alive = 1;
start_rec.fn(start_rec.arg); start_rec.fn(start_rec.arg);
@ -168,6 +176,11 @@ void z_mp_entry(void)
#endif #endif
} }
bool arch_cpu_active(int cpu_num)
{
return !!(cpu_mask & BIT(cpu_num));
}
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
arch_cpustart_t fn, void *arg) arch_cpustart_t fn, void *arg)
{ {
@ -195,6 +208,7 @@ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
idc_reg = idc_read(IPC_IDCCTL, cpu_num); idc_reg = idc_read(IPC_IDCCTL, cpu_num);
idc_reg |= IPC_IDCCTL_IDCTBIE(0); idc_reg |= IPC_IDCCTL_IDCTBIE(0);
idc_write(IPC_IDCCTL, cpu_num, idc_reg); idc_write(IPC_IDCCTL, cpu_num, idc_reg);
/* FIXME: 8 is IRQ_BIT_LVL2_IDC / PLATFORM_IDC_INTERRUPT */
sys_set_bit(DT_REG_ADDR(DT_NODELABEL(cavs0)) + 0x04 + sys_set_bit(DT_REG_ADDR(DT_NODELABEL(cavs0)) + 0x04 +
CAVS_ICTL_INT_CPU_OFFSET(cpu_num), 8); CAVS_ICTL_INT_CPU_OFFSET(cpu_num), 8);
@ -213,12 +227,24 @@ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
sys_set_bit(DT_REG_ADDR(DT_NODELABEL(cavs0)) + 0x00 + sys_set_bit(DT_REG_ADDR(DT_NODELABEL(cavs0)) + 0x00 +
CAVS_ICTL_INT_CPU_OFFSET(cpu_num), 8); CAVS_ICTL_INT_CPU_OFFSET(cpu_num), 8);
while (start_rec.alive == 0) { k_busy_wait(100);
}
#ifdef CONFIG_SMP_BOOT_DELAY
cavs_idc_smp_init(NULL);
#endif
/* Clear done bit from responding the power up message */ /* Clear done bit from responding the power up message */
idc_reg = idc_read(IPC_IDCIETC(cpu_num), 0) | IPC_IDCIETC_DONE; idc_reg = idc_read(IPC_IDCIETC(cpu_num), 0) | IPC_IDCIETC_DONE;
idc_write(IPC_IDCIETC(cpu_num), 0, idc_reg); idc_write(IPC_IDCIETC(cpu_num), 0, idc_reg);
while (!start_rec.alive)
;
/*
* No locking needed as long as CPUs can only be powered on by the main
* CPU and cannot be powered off
*/
cpu_mask |= BIT(cpu_num);
} }
#ifdef CONFIG_SCHED_IPI_SUPPORTED #ifdef CONFIG_SCHED_IPI_SUPPORTED