arch: custom cpu_idle and cpu_atomic harmonization

custom arch_cpu_idle and arch_cpu_atomic_idle implementation was done
differently on different architectures. riscv implemented those as weak
symbols, xtensa used a kconfig and all other architectures did not
really care, but this was a global kconfig that should apply to all
architectures.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2024-07-04 14:37:08 -04:00 committed by Carles Cufí
commit 7f52fc4188
12 changed files with 49 additions and 3 deletions

View file

@ -1084,3 +1084,10 @@ config ARCH_CPU_IDLE_CUSTOM
help help
This options allows applications to override the default arch idle implementation with This options allows applications to override the default arch idle implementation with
a custom one. a custom one.
config ARCH_CPU_ATOMIC_IDLE_CUSTOM
bool "Custom arch_cpu_atomic_idle implementation"
default n
help
This options allows applications to override the default arch idle implementation with
a custom one.

View file

@ -26,6 +26,7 @@ SECTION_VAR(BSS, z_arc_cpu_sleep_mode)
.align 4 .align 4
.word 0 .word 0
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
/* /*
* @brief Put the CPU in low-power mode * @brief Put the CPU in low-power mode
* *
@ -48,7 +49,9 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
sleep r1 sleep r1
j_s [blink] j_s [blink]
nop nop
#endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
/* /*
* @brief Put the CPU in low-power mode, entered with IRQs locked * @brief Put the CPU in low-power mode, entered with IRQs locked
* *
@ -56,6 +59,7 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
* *
* void arch_cpu_atomic_idle(unsigned int key) * void arch_cpu_atomic_idle(unsigned int key)
*/ */
SECTION_FUNC(TEXT, arch_cpu_atomic_idle) SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
@ -70,3 +74,4 @@ SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
sleep r1 sleep r1
j_s.d [blink] j_s.d [blink]
seti r0 seti r0
#endif

View file

@ -49,6 +49,7 @@ _skip_\@:
#endif /* CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK */ #endif /* CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK */
.endm .endm
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
SECTION_FUNC(TEXT, arch_cpu_idle) SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push {r0, lr} push {r0, lr}
@ -68,6 +69,9 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
bx lr bx lr
#endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
SECTION_FUNC(TEXT, arch_cpu_atomic_idle) SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push {r0, lr} push {r0, lr}
@ -93,3 +97,4 @@ SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
_irq_disabled: _irq_disabled:
bx lr bx lr
#endif

View file

@ -53,6 +53,7 @@ void z_arm_cpu_idle_init(void)
} while (false) } while (false)
#endif #endif
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
#if defined(CONFIG_TRACING) #if defined(CONFIG_TRACING)
@ -96,7 +97,9 @@ void arch_cpu_idle(void)
__enable_irq(); __enable_irq();
__ISB(); __ISB();
} }
#endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
void arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
#if defined(CONFIG_TRACING) #if defined(CONFIG_TRACING)
@ -135,3 +138,4 @@ void arch_cpu_atomic_idle(unsigned int key)
__enable_irq(); __enable_irq();
#endif #endif
} }
#endif

View file

@ -13,7 +13,7 @@
#include <zephyr/arch/cpu.h> #include <zephyr/arch/cpu.h>
_ASM_FILE_PROLOGUE _ASM_FILE_PROLOGUE
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
GTEXT(arch_cpu_idle) GTEXT(arch_cpu_idle)
SECTION_FUNC(TEXT, arch_cpu_idle) SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
@ -25,7 +25,9 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
wfi wfi
msr daifclr, #(DAIFCLR_IRQ_BIT) msr daifclr, #(DAIFCLR_IRQ_BIT)
ret ret
#endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
GTEXT(arch_cpu_atomic_idle) GTEXT(arch_cpu_atomic_idle)
SECTION_FUNC(TEXT, arch_cpu_atomic_idle) SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
@ -41,3 +43,5 @@ SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
msr daifclr, #(DAIFCLR_IRQ_BIT) msr daifclr, #(DAIFCLR_IRQ_BIT)
_irq_disabled: _irq_disabled:
ret ret
#endif

View file

@ -19,12 +19,16 @@ static ALWAYS_INLINE void mips_idle(unsigned int key)
__asm__ volatile("wait"); __asm__ volatile("wait");
} }
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
mips_idle(1); mips_idle(1);
} }
#endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
void arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
mips_idle(key); mips_idle(key);
} }
#endif

View file

@ -7,6 +7,7 @@
#include <zephyr/kernel.h> #include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h> #include <zephyr/kernel_structs.h>
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
/* Do nothing but unconditionally unlock interrupts and return to the /* Do nothing but unconditionally unlock interrupts and return to the
@ -14,7 +15,9 @@ void arch_cpu_idle(void)
*/ */
irq_unlock(NIOS2_STATUS_PIE_MSK); irq_unlock(NIOS2_STATUS_PIE_MSK);
} }
#endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
void arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
/* Do nothing but restore IRQ state. This CPU does not have any /* Do nothing but restore IRQ state. This CPU does not have any
@ -22,3 +25,4 @@ void arch_cpu_atomic_idle(unsigned int key)
*/ */
irq_unlock(key); irq_unlock(key);
} }
#endif

View file

@ -7,16 +7,20 @@
#include <zephyr/irq.h> #include <zephyr/irq.h>
#include <zephyr/tracing/tracing.h> #include <zephyr/tracing/tracing.h>
void __weak arch_cpu_idle(void) #ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
void arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
__asm__ volatile("wfi"); __asm__ volatile("wfi");
irq_unlock(MSTATUS_IEN); irq_unlock(MSTATUS_IEN);
} }
#endif
void __weak arch_cpu_atomic_idle(unsigned int key) #ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
void arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();
__asm__ volatile("wfi"); __asm__ volatile("wfi");
irq_unlock(key); irq_unlock(key);
} }
#endif

View file

@ -7,6 +7,7 @@
#include <zephyr/tracing/tracing.h> #include <zephyr/tracing/tracing.h>
#include <zephyr/arch/cpu.h> #include <zephyr/arch/cpu.h>
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
__pinned_func __pinned_func
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
@ -15,7 +16,9 @@ void arch_cpu_idle(void)
"sti\n\t" "sti\n\t"
"hlt\n\t"); "hlt\n\t");
} }
#endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
__pinned_func __pinned_func
void arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
@ -42,3 +45,4 @@ void arch_cpu_atomic_idle(unsigned int key)
__asm__ volatile("cli"); __asm__ volatile("cli");
} }
} }
#endif

View file

@ -14,6 +14,7 @@ void arch_cpu_idle(void)
} }
#endif #endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
void arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();
@ -21,3 +22,4 @@ void arch_cpu_atomic_idle(unsigned int key)
"wsr.ps %0\n\t" "wsr.ps %0\n\t"
"rsync" :: "a"(key)); "rsync" :: "a"(key));
} }
#endif

View file

@ -4,6 +4,8 @@
config SOC_SERIES_IT8XXX2 config SOC_SERIES_IT8XXX2
select CPU_HAS_FPU if "$(ZEPHYR_TOOLCHAIN_VARIANT)" != "zephyr" || RISCV_ISA_EXT_M select CPU_HAS_FPU if "$(ZEPHYR_TOOLCHAIN_VARIANT)" != "zephyr" || RISCV_ISA_EXT_M
select HAS_PM select HAS_PM
select ARCH_CPU_IDLE_CUSTOM
select ARCH_CPU_ATOMIC_IDLE_CUSTOM
if SOC_SERIES_IT8XXX2 if SOC_SERIES_IT8XXX2

View file

@ -18,6 +18,7 @@ config RISCV_CORE_NORDIC_VPR
select RISCV_SOC_CONTEXT_SAVE select RISCV_SOC_CONTEXT_SAVE
select HAS_FLASH_LOAD_OFFSET select HAS_FLASH_LOAD_OFFSET
select ARCH_CPU_IDLE_CUSTOM select ARCH_CPU_IDLE_CUSTOM
select ARCH_CPU_ATOMIC_IDLE_CUSTOM
select INCLUDE_RESET_VECTOR select INCLUDE_RESET_VECTOR
help help
Enable support for the RISC-V Nordic VPR core. Enable support for the RISC-V Nordic VPR core.