arch: use same syntax for custom arch calls

Use same Kconfig syntax for those  custom arch call.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2024-07-05 05:52:13 -04:00 committed by Carles Cufí
commit a91c6e56c8
15 changed files with 30 additions and 32 deletions

View file

@ -1078,16 +1078,14 @@ config TOOLCHAIN_HAS_BUILTIN_FFS
help help
Hidden option to signal that toolchain has __builtin_ffs*(). Hidden option to signal that toolchain has __builtin_ffs*().
config ARCH_CPU_IDLE_CUSTOM config ARCH_HAS_CUSTOM_CPU_IDLE
bool "Custom arch_cpu_idle implementation" bool
default n
help help
This options allows applications to override the default arch idle implementation with This options allows applications to override the default arch idle implementation with
a custom one. a custom one.
config ARCH_CPU_ATOMIC_IDLE_CUSTOM config ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
bool "Custom arch_cpu_atomic_idle implementation" bool
default n
help help
This options allows applications to override the default arch idle implementation with This options allows applications to override the default arch idle implementation with
a custom one. a custom one.

View file

@ -26,7 +26,7 @@ SECTION_VAR(BSS, z_arc_cpu_sleep_mode)
.align 4 .align 4
.word 0 .word 0
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
/* /*
* @brief Put the CPU in low-power mode * @brief Put the CPU in low-power mode
* *
@ -51,7 +51,7 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
nop nop
#endif #endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
/* /*
* @brief Put the CPU in low-power mode, entered with IRQs locked * @brief Put the CPU in low-power mode, entered with IRQs locked
* *

View file

@ -49,7 +49,7 @@ _skip_\@:
#endif /* CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK */ #endif /* CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK */
.endm .endm
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
SECTION_FUNC(TEXT, arch_cpu_idle) SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push {r0, lr} push {r0, lr}
@ -71,7 +71,7 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
#endif #endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
SECTION_FUNC(TEXT, arch_cpu_atomic_idle) SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push {r0, lr} push {r0, lr}

View file

@ -53,7 +53,7 @@ void z_arm_cpu_idle_init(void)
} while (false) } while (false)
#endif #endif
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
#if defined(CONFIG_TRACING) #if defined(CONFIG_TRACING)
@ -99,7 +99,7 @@ void arch_cpu_idle(void)
} }
#endif #endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
#if defined(CONFIG_TRACING) #if defined(CONFIG_TRACING)

View file

@ -13,7 +13,7 @@
#include <zephyr/arch/cpu.h> #include <zephyr/arch/cpu.h>
_ASM_FILE_PROLOGUE _ASM_FILE_PROLOGUE
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
GTEXT(arch_cpu_idle) GTEXT(arch_cpu_idle)
SECTION_FUNC(TEXT, arch_cpu_idle) SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
@ -27,7 +27,7 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
ret ret
#endif #endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
GTEXT(arch_cpu_atomic_idle) GTEXT(arch_cpu_atomic_idle)
SECTION_FUNC(TEXT, arch_cpu_atomic_idle) SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING

View file

@ -19,14 +19,14 @@ static ALWAYS_INLINE void mips_idle(unsigned int key)
__asm__ volatile("wait"); __asm__ volatile("wait");
} }
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
mips_idle(1); mips_idle(1);
} }
#endif #endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
mips_idle(key); mips_idle(key);

View file

@ -7,7 +7,7 @@
#include <zephyr/kernel.h> #include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h> #include <zephyr/kernel_structs.h>
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
/* Do nothing but unconditionally unlock interrupts and return to the /* Do nothing but unconditionally unlock interrupts and return to the
@ -17,7 +17,7 @@ void arch_cpu_idle(void)
} }
#endif #endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
/* Do nothing but restore IRQ state. This CPU does not have any /* Do nothing but restore IRQ state. This CPU does not have any

View file

@ -7,7 +7,7 @@
#include <zephyr/irq.h> #include <zephyr/irq.h>
#include <zephyr/tracing/tracing.h> #include <zephyr/tracing/tracing.h>
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
@ -16,7 +16,7 @@ void arch_cpu_idle(void)
} }
#endif #endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();

View file

@ -7,7 +7,7 @@
#include <zephyr/tracing/tracing.h> #include <zephyr/tracing/tracing.h>
#include <zephyr/arch/cpu.h> #include <zephyr/arch/cpu.h>
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
__pinned_func __pinned_func
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
@ -18,7 +18,7 @@ void arch_cpu_idle(void)
} }
#endif #endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
__pinned_func __pinned_func
void arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {

View file

@ -6,7 +6,7 @@
#include <zephyr/toolchain.h> #include <zephyr/toolchain.h>
#include <zephyr/tracing/tracing.h> #include <zephyr/tracing/tracing.h>
#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
@ -14,7 +14,7 @@ void arch_cpu_idle(void)
} }
#endif #endif
#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM #ifndef CONFIG_ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
void arch_cpu_atomic_idle(unsigned int key) void arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();

View file

@ -8,7 +8,7 @@ config SOC_SERIES_INTEL_ADSP_ACE
select ATOMIC_OPERATIONS_BUILTIN if "$(ZEPHYR_TOOLCHAIN_VARIANT)" != "xcc" select ATOMIC_OPERATIONS_BUILTIN if "$(ZEPHYR_TOOLCHAIN_VARIANT)" != "xcc"
select ARCH_HAS_COHERENCE select ARCH_HAS_COHERENCE
select SCHED_IPI_SUPPORTED select SCHED_IPI_SUPPORTED
select ARCH_CPU_IDLE_CUSTOM select ARCH_HAS_CUSTOM_CPU_IDLE
select DW_ICTL_ACE select DW_ICTL_ACE
select SOC_HAS_RUNTIME_NUM_CPUS select SOC_HAS_RUNTIME_NUM_CPUS
select HAS_PM select HAS_PM

View file

@ -445,7 +445,7 @@ void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
#ifdef CONFIG_ARCH_CPU_IDLE_CUSTOM #ifdef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
__no_optimization __no_optimization
void arch_cpu_idle(void) void arch_cpu_idle(void)
@ -465,4 +465,4 @@ void arch_cpu_idle(void)
__asm__ volatile ("waiti 0"); __asm__ volatile ("waiti 0");
} }
#endif /* CONFIG_ARCH_CPU_IDLE_CUSTOM */ #endif /* CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE */

View file

@ -194,7 +194,7 @@ void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
} }
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
#ifdef CONFIG_ARCH_CPU_IDLE_CUSTOM #ifdef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE
/* xt-clang removes any NOPs more than 8. So we need to set /* xt-clang removes any NOPs more than 8. So we need to set
* no optimization to avoid those NOPs from being removed. * no optimization to avoid those NOPs from being removed.
* *

View file

@ -4,8 +4,8 @@
config SOC_SERIES_IT8XXX2 config SOC_SERIES_IT8XXX2
select CPU_HAS_FPU if "$(ZEPHYR_TOOLCHAIN_VARIANT)" != "zephyr" || RISCV_ISA_EXT_M select CPU_HAS_FPU if "$(ZEPHYR_TOOLCHAIN_VARIANT)" != "zephyr" || RISCV_ISA_EXT_M
select HAS_PM select HAS_PM
select ARCH_CPU_IDLE_CUSTOM select ARCH_HAS_CUSTOM_CPU_IDLE
select ARCH_CPU_ATOMIC_IDLE_CUSTOM select ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
if SOC_SERIES_IT8XXX2 if SOC_SERIES_IT8XXX2

View file

@ -17,8 +17,8 @@ config RISCV_CORE_NORDIC_VPR
select RISCV_HAS_CLIC select RISCV_HAS_CLIC
select RISCV_SOC_CONTEXT_SAVE select RISCV_SOC_CONTEXT_SAVE
select HAS_FLASH_LOAD_OFFSET select HAS_FLASH_LOAD_OFFSET
select ARCH_CPU_IDLE_CUSTOM select ARCH_HAS_CUSTOM_CPU_IDLE
select ARCH_CPU_ATOMIC_IDLE_CUSTOM select ARCH_HAS_CUSTOM_CPU_ATOMIC_IDLE
select INCLUDE_RESET_VECTOR select INCLUDE_RESET_VECTOR
help help
Enable support for the RISC-V Nordic VPR core. Enable support for the RISC-V Nordic VPR core.