arch/xtensa: soc/intel_adsp: Rework MP code entry

Instead of passing the crt1 _start function as the entry code for
auxiliary CPUs, use a tiny assembly stub instead which can avoid the
runtime testing needed to skip the work in _start.  All the crt1 code
was doing was clearing BSS (which must not happen on a second CPU) and
setting the stack pointer (which is wrong on the second CPU).

This allows us to clean out the SMP code in crt1.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2021-02-16 08:07:04 -08:00 committed by Anas Nashif
commit a230fafde5
2 changed files with 26 additions and 28 deletions

View file

@ -148,13 +148,6 @@ _start:
movi a0, 0 movi a0, 0
# endif # endif
# if CONFIG_MP_NUM_CPUS > 1
/* Only clear BSS when running on core 0 */
rsr a3, PRID
extui a3, a3, 0, 8 /* extract core ID */
bnez a3, .L3zte
# endif
/* /*
* Clear the BSS (uninitialized data) segments. * Clear the BSS (uninitialized data) segments.
* This code supports multiple zeroed sections (*.bss). * This code supports multiple zeroed sections (*.bss).
@ -196,24 +189,6 @@ _start:
#endif /* !XCHAL_HAVE_BOOTLOADER */ #endif /* !XCHAL_HAVE_BOOTLOADER */
#if CONFIG_MP_NUM_CPUS > 1
/*
* z_cstart() is only for CPU #0.
* Other CPUs have different entry point.
*/
rsr a3, PRID
extui a3, a3, 0, 8 /* extract core ID */
beqz a3, 2f
/* Load our stack pointer set up for us by the SOC layer */
movi a1, z_mp_stack_top
l32i a1, a1, 0
call4 z_mp_entry
2:
#endif
/* Enter C domain, never returns from here */ /* Enter C domain, never returns from here */
CALL z_cstart CALL z_cstart

View file

@ -55,8 +55,6 @@ LOG_MODULE_REGISTER(soc_mp, CONFIG_SOC_LOG_LEVEL);
static const struct device *idc; static const struct device *idc;
#endif #endif
extern void __start(void);
struct cpustart_rec { struct cpustart_rec {
uint32_t cpu; uint32_t cpu;
@ -90,6 +88,29 @@ static __aligned(XCHAL_DCACHE_LINESIZE) union {
(*((volatile struct cpustart_rec *) \ (*((volatile struct cpustart_rec *) \
z_soc_uncached_ptr(&cpustart_mem.cpustart))) z_soc_uncached_ptr(&cpustart_mem.cpustart)))
/* Tiny assembly stub for calling z_mp_entry() on the auxiliary CPUs.
* Mask interrupts, clear the register window state and set the stack
* pointer. This represents the minimum work required to run C code
* safely.
*
* Note that alignment is absolutely required: the IDC protocol passes
* only the upper 30 bits of the address to the second CPU.
*/
void z_soc_mp_asm_entry(void);
__asm__(".align 4 \n\t"
".global z_soc_mp_asm_entry \n\t"
"z_soc_mp_asm_entry: \n\t"
" rsil a0, 5 \n\t" /* 5 == XCHAL_EXCM_LEVEL */
" movi a0, 0 \n\t"
" wsr a0, WINDOWBASE \n\t"
" movi a0, 1 \n\t"
" wsr a0, WINDOWSTART \n\t"
" rsync \n\t"
" movi a1, z_mp_stack_top \n\t"
" l32i a1, a1, 0 \n\t"
" call4 z_mp_entry \n\t");
BUILD_ASSERT(XCHAL_EXCM_LEVEL == 5);
void z_mp_entry(void) void z_mp_entry(void)
{ {
volatile int ie; volatile int ie;
@ -177,7 +198,9 @@ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
CAVS_ICTL_INT_CPU_OFFSET(cpu_num), 8); CAVS_ICTL_INT_CPU_OFFSET(cpu_num), 8);
/* Send power up message to the other core */ /* Send power up message to the other core */
idc_write(IPC_IDCIETC(cpu_num), 0, IDC_MSG_POWER_UP_EXT(RAM_BASE)); uint32_t ietc = IDC_MSG_POWER_UP_EXT((long) z_soc_mp_asm_entry);
idc_write(IPC_IDCIETC(cpu_num), 0, ietc);
idc_write(IPC_IDCITC(cpu_num), 0, IDC_MSG_POWER_UP | IPC_IDCITC_BUSY); idc_write(IPC_IDCITC(cpu_num), 0, IDC_MSG_POWER_UP | IPC_IDCITC_BUSY);
/* Disable IDC interrupt on other core so IPI won't cause /* Disable IDC interrupt on other core so IPI won't cause