arch/xtensa: Add arch_cpu_idle() workarounds
A simple WAITI isn't sufficient in all cases. The cAVS 2.5 hardware uses WAITI as the entry state for per-core power gating, which is very difficult to debug. Provide a fallback that simply spins in the idle loop waiting for interrupts to provide a stable system while this feature stabilizes. Also, the SOF code for those platforms references a known bug with the Xtensa LX6 core IP (or at least some versions), and will prefix the WAIT instruction with 128 NOP.N's followed by an ISYNC and EXTW. This bug hasn't been seen under Zephyr yet, and details are sketchy. But the code is simply enough to import and works correctly. Place both workaround under new kconfig variables and select them both (even though they're actually mutually exclusive -- if you select both CPU_IDLE_SPIN overrides) for cavs_v25. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
b76bc6c80d
commit
37bbe7aeea
2 changed files with 40 additions and 0 deletions
|
@ -78,4 +78,16 @@ config XTENSA_ENABLE_BACKTRACE
|
|||
help
|
||||
Enable this config option to print backtrace on panic exception
|
||||
|
||||
config XTENSA_CPU_IDLE_SPIN
|
||||
bool "Use busy loop for k_cpu_idle"
|
||||
help
|
||||
Use a spin loop instead of WAITI for the CPU idle state.
|
||||
|
||||
config XTENSA_WAITI_BUG
|
||||
bool "Enable workaround sequence for WAITI bug on LX6"
|
||||
help
|
||||
SOF traditionally contains this workaround on its ADSP
|
||||
platforms which prefixes a WAITI entry with 128 NOP
|
||||
instructions followed by an ISYNC and EXTW.
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -8,8 +8,36 @@
|
|||
void arch_cpu_idle(void)
|
||||
{
|
||||
sys_trace_idle();
|
||||
|
||||
/* Just spin forever with interrupts unmasked, for platforms
|
||||
* where WAITI can't be used or where its behavior is
|
||||
* complicated (Intel DSPs will power gate on idle entry under
|
||||
* some circumstances)
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_XTENSA_CPU_IDLE_SPIN)) {
|
||||
__asm__ volatile("rsil a0, 0");
|
||||
__asm__ volatile("loop_forever: j loop_forever");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Cribbed from SOF: workaround for a bug in some versions of
|
||||
* the LX6 IP. Preprocessor ugliness avoids the need to
|
||||
* figure out how to get the compiler to unroll a loop.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_XTENSA_WAITI_BUG)) {
|
||||
#define NOP4 __asm__ volatile("nop; nop; nop; nop");
|
||||
#define NOP32 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4
|
||||
#define NOP128() NOP32 NOP32 NOP32 NOP32
|
||||
NOP128();
|
||||
#undef NOP128
|
||||
#undef NOP16
|
||||
#undef NOP4
|
||||
__asm__ volatile("isync; extw");
|
||||
}
|
||||
|
||||
__asm__ volatile ("waiti 0");
|
||||
}
|
||||
|
||||
void arch_cpu_atomic_idle(unsigned int key)
|
||||
{
|
||||
sys_trace_idle();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue