diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index f92b2e298b2..4c0f06fbe9c 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -78,4 +78,16 @@ config XTENSA_ENABLE_BACKTRACE help Enable this config option to print backtrace on panic exception +config XTENSA_CPU_IDLE_SPIN + bool "Use busy loop for k_cpu_idle" + help + Use a spin loop instead of WAITI for the CPU idle state. + +config XTENSA_WAITI_BUG + bool "Enable workaround sequence for WAITI bug on LX6" + help + SOF traditionally contains this workaround on its ADSP + platforms which prefixes a WAITI entry with 128 NOP + instructions followed by an ISYNC and EXTW. + endmenu diff --git a/arch/xtensa/core/cpu_idle.c b/arch/xtensa/core/cpu_idle.c index 7ce4ca6c341..648ad1d8abd 100644 --- a/arch/xtensa/core/cpu_idle.c +++ b/arch/xtensa/core/cpu_idle.c @@ -8,8 +8,36 @@ void arch_cpu_idle(void) { sys_trace_idle(); + + /* Just spin forever with interrupts unmasked, for platforms + * where WAITI can't be used or where its behavior is + * complicated (Intel DSPs will power gate on idle entry under + * some circumstances) + */ + if (IS_ENABLED(CONFIG_XTENSA_CPU_IDLE_SPIN)) { + __asm__ volatile("rsil a0, 0"); + __asm__ volatile("loop_forever: j loop_forever"); + return; + } + + /* Cribbed from SOF: workaround for a bug in some versions of + * the LX6 IP. Preprocessor ugliness avoids the need to + * figure out how to get the compiler to unroll a loop. + */ + if (IS_ENABLED(CONFIG_XTENSA_WAITI_BUG)) { +#define NOP4 __asm__ volatile("nop; nop; nop; nop"); +#define NOP32 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 +#define NOP128() NOP32 NOP32 NOP32 NOP32 + NOP128(); +#undef NOP128 +#undef NOP16 +#undef NOP4 + __asm__ volatile("isync; extw"); + } + __asm__ volatile ("waiti 0"); } + void arch_cpu_atomic_idle(unsigned int key) { sys_trace_idle();