From e69465634589a2b13a8662e264df48dc923f02d2 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Thu, 25 Jan 2018 16:39:35 -0800 Subject: [PATCH] kernel: Move per-cpu _kernel_t fields into separate struct When in SMP mode, the nested/irq_stack/current fields are specific to the current CPU and not to the kernel as a whole, so we need an array of these. Place them in a _cpu_t struct and implement a _arch_curr_cpu() function to retrieve the pointer. When not in SMP mode, the first CPU's fields are defined as a unioned with the first _cpu_t record. This permits compatibility with legacy assembly on other platforms. Long term, all users, including uniprocessor architectures, should be updated to use the new scheme. Fundamentally this is just renaming: the structure layout and runtime code do not change on any existing platforms and won't until someone defines a second CPU. Signed-off-by: Andy Ross --- arch/xtensa/core/xtensa-asm2-util.S | 2 +- arch/xtensa/core/xtensa-asm2.c | 8 +++---- arch/xtensa/include/kernel_arch_func.h | 22 +++++++++++++----- arch/xtensa/soc/esp32/soc.c | 8 +++++++ kernel/include/kernel_offsets.h | 10 ++++++-- kernel/include/kernel_structs.h | 32 ++++++++++++++++++++++++-- kernel/include/kswap.h | 6 ++--- 7 files changed, 70 insertions(+), 18 deletions(-) diff --git a/arch/xtensa/core/xtensa-asm2-util.S b/arch/xtensa/core/xtensa-asm2-util.S index f2c6750313f..33b41370010 100644 --- a/arch/xtensa/core/xtensa-asm2-util.S +++ b/arch/xtensa/core/xtensa-asm2-util.S @@ -232,7 +232,7 @@ _switch_restore_pc: */ .align 4 _handle_excint: - EXCINT_HANDLER MISC0, ___kernel_t_nested_OFFSET, ___kernel_t_irq_stack_OFFSET + EXCINT_HANDLER MISC0, ___cpu_t_nested_OFFSET, ___cpu_t_irq_stack_OFFSET /* Define the actual vectors for the hardware-defined levels with * DEF_EXCINT. These load a C handler address and jump to our handler diff --git a/arch/xtensa/core/xtensa-asm2.c b/arch/xtensa/core/xtensa-asm2.c index 05a3617e30b..8f2412cb0d4 100644 --- a/arch/xtensa/core/xtensa-asm2.c +++ b/arch/xtensa/core/xtensa-asm2.c @@ -140,16 +140,16 @@ static void dump_stack(int *stack) #if CONFIG_XTENSA_ASM2 static inline void *restore_stack(void *interrupted_stack) { - if (!_is_preempt(_kernel.current)) { + if (!_is_preempt(_current)) { return interrupted_stack; } int key = irq_lock(); - _kernel.current->switch_handle = interrupted_stack; - _kernel.current = _get_next_ready_thread(); + _current->switch_handle = interrupted_stack; + _current = _get_next_ready_thread(); - void *ret = _kernel.current->switch_handle; + void *ret = _current->switch_handle; irq_unlock(key); diff --git a/arch/xtensa/include/kernel_arch_func.h b/arch/xtensa/include/kernel_arch_func.h index fd8792ca40d..b436bed9ee6 100644 --- a/arch/xtensa/include/kernel_arch_func.h +++ b/arch/xtensa/include/kernel_arch_func.h @@ -28,6 +28,15 @@ extern void _xt_coproc_init(void); extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE); +static ALWAYS_INLINE _cpu_t *_arch_curr_cpu(void) +{ + void *val; + + __asm__ volatile("rsr.misc0 %0" : "=r"(val)); + + return val; +} + /** * * @brief Performs architecture-specific initialization @@ -40,11 +49,13 @@ extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE); */ static ALWAYS_INLINE void kernel_arch_init(void) { - _kernel.nested = 0; + _cpu_t *cpu0 = &_kernel.cpus[0]; + + cpu0->nested = 0; #if CONFIG_XTENSA_ASM2 - _kernel.irq_stack = (K_THREAD_STACK_BUFFER(_interrupt_stack) + - CONFIG_ISR_STACK_SIZE); + cpu0->irq_stack = (K_THREAD_STACK_BUFFER(_interrupt_stack) + + CONFIG_ISR_STACK_SIZE); /* The asm2 scheme keeps the kernel pointer in MISC0 for easy * access. That saves 4 bytes of immediate value to store the @@ -52,9 +63,8 @@ static ALWAYS_INLINE void kernel_arch_init(void) * this record is a per-CPU thing and having it stored in a SR * already is a big win. */ - void *cpuptr = &_kernel; + __asm__ volatile("wsr.MISC0 %0; rsync" : : "r"(cpu0)); - __asm__ volatile("wsr.MISC0 %0; rsync" : : "r"(cpuptr)); #endif #if !defined(CONFIG_XTENSA_ASM2) && XCHAL_CP_NUM > 0 @@ -102,7 +112,7 @@ static inline void _IntLibInit(void) } #endif -#define _is_in_isr() (_kernel.nested != 0) +#define _is_in_isr() (_arch_curr_cpu()->nested != 0) #endif /* _ASMLANGUAGE */ diff --git a/arch/xtensa/soc/esp32/soc.c b/arch/xtensa/soc/esp32/soc.c index 697cccba12d..80bacfae851 100644 --- a/arch/xtensa/soc/esp32/soc.c +++ b/arch/xtensa/soc/esp32/soc.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -60,6 +61,13 @@ void __attribute__((section(".iram1"))) __start(void) /* Disable CPU1 while we figure out how to have SMP in Zephyr. */ *app_cpu_config_reg &= ~DPORT_APPCPU_CLKGATE_EN; + /* Initialize the architecture CPU pointer. Some of the + * initialization code wants a valid _current before + * kernel_arch_init() is invoked. + */ + __asm__ volatile("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0])); + + /* Start Zephyr */ _Cstart(); diff --git a/kernel/include/kernel_offsets.h b/kernel/include/kernel_offsets.h index cdb41c86601..9db64b1a249 100644 --- a/kernel/include/kernel_offsets.h +++ b/kernel/include/kernel_offsets.h @@ -17,14 +17,20 @@ GEN_ABS_SYM_BEGIN(_OffsetAbsSyms) +#ifndef CONFIG_SMP GEN_OFFSET_SYM(_kernel_t, current); +GEN_OFFSET_SYM(_kernel_t, nested); +GEN_OFFSET_SYM(_kernel_t, irq_stack); +#endif + +GEN_OFFSET_SYM(_cpu_t, current); +GEN_OFFSET_SYM(_cpu_t, nested); +GEN_OFFSET_SYM(_cpu_t, irq_stack); #if defined(CONFIG_THREAD_MONITOR) GEN_OFFSET_SYM(_kernel_t, threads); #endif -GEN_OFFSET_SYM(_kernel_t, nested); -GEN_OFFSET_SYM(_kernel_t, irq_stack); #ifdef CONFIG_SYS_POWER_MANAGEMENT GEN_OFFSET_SYM(_kernel_t, idle); #endif diff --git a/kernel/include/kernel_structs.h b/kernel/include/kernel_structs.h index dbe3f104362..5716822bf93 100644 --- a/kernel/include/kernel_structs.h +++ b/kernel/include/kernel_structs.h @@ -79,8 +79,7 @@ struct _ready_q { typedef struct _ready_q _ready_q_t; -struct _kernel { - +struct _cpu { /* nested interrupt count */ u32_t nested; @@ -89,6 +88,30 @@ struct _kernel { /* currently scheduled thread */ struct k_thread *current; +}; + +typedef struct _cpu _cpu_t; + +struct _kernel { + /* For compatibility with pre-SMP code, union the first CPU + * record with the legacy fields so code can continue to use + * the "_kernel.XXX" expressions and assembly offsets. + */ + union { + struct _cpu cpus[CONFIG_MP_NUM_CPUS]; +#ifndef CONFIG_SMP + struct { + /* nested interrupt count */ + u32_t nested; + + /* interrupt stack pointer base */ + char *irq_stack; + + /* currently scheduled thread */ + struct k_thread *current; + }; +#endif + }; #ifdef CONFIG_SYS_CLOCK_EXISTS /* queue of timeouts */ @@ -131,7 +154,12 @@ typedef struct _kernel _kernel_t; extern struct _kernel _kernel; +#ifdef CONFIG_SMP +#define _current (_arch_curr_cpu()->current) +#else #define _current _kernel.current +#endif + #define _ready_q _kernel.ready_q #define _timeout_q _kernel.timeout_q #define _threads _kernel.threads diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index a8be6169d83..05baf631fc4 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -34,7 +34,7 @@ static inline unsigned int _Swap(unsigned int key) struct k_thread *new_thread, *old_thread; int ret; - old_thread = _kernel.current; + old_thread = _current; _check_stack_sentinel(); _update_time_slice_before_swap(); @@ -43,11 +43,11 @@ static inline unsigned int _Swap(unsigned int key) old_thread->swap_retval = -EAGAIN; - _kernel.current = new_thread; + _current = new_thread; _arch_switch(new_thread->switch_handle, &old_thread->switch_handle); - ret =_kernel.current->swap_retval; + ret = _current->swap_retval; irq_unlock(key);