kernel: interrupt/idle stacks/threads as array

The set of interrupt stacks is now expressed as an array. We
also define the idle threads and their associated stacks this
way. This allows for iteration in cases where we have multiple
CPUs.

There is now a centralized declaration in kernel_internal.h.

On uniprocessor systems, z_interrupt_stacks has one element
and can be used in the same way as _interrupt_stack.

The IRQ stack for CPU 0 is now set in init.c instead of in
arch code.

The extern definition of the main thread stack is now removed,
this doesn't need to be in a header.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2020-03-12 15:37:29 -07:00 committed by Andrew Boie
commit 80a0d9d16b
29 changed files with 110 additions and 165 deletions

View file

@ -114,9 +114,6 @@ static int arc_smp_init(struct device *dev)
struct arc_connect_bcr bcr; struct arc_connect_bcr bcr;
/* necessary master core init */ /* necessary master core init */
_kernel.cpus[0].id = 0;
_kernel.cpus[0].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack)
+ CONFIG_ISR_STACK_SIZE;
_curr_cpu[0] = &(_kernel.cpus[0]); _curr_cpu[0] = &(_kernel.cpus[0]);
bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR); bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR);

View file

@ -16,14 +16,14 @@
#include <arch/cpu.h> #include <arch/cpu.h>
#include <swap_macros.h> #include <swap_macros.h>
GDATA(_interrupt_stack) GDATA(z_interrupt_stacks)
GDATA(z_main_stack) GDATA(z_main_stack)
GDATA(_VectorTable) GDATA(_VectorTable)
/* use one of the available interrupt stacks during init */ /* use one of the available interrupt stacks during init */
#define INIT_STACK _interrupt_stack #define INIT_STACK z_interrupt_stacks
#define INIT_STACK_SIZE CONFIG_ISR_STACK_SIZE #define INIT_STACK_SIZE CONFIG_ISR_STACK_SIZE
GTEXT(__reset) GTEXT(__reset)
@ -161,7 +161,7 @@ _master_core_startup:
mov_s sp, z_main_stack mov_s sp, z_main_stack
add sp, sp, CONFIG_MAIN_STACK_SIZE add sp, sp, CONFIG_MAIN_STACK_SIZE
mov_s r0, _interrupt_stack mov_s r0, z_interrupt_stacks
mov_s r1, 0xaa mov_s r1, 0xaa
mov_s r2, CONFIG_ISR_STACK_SIZE mov_s r2, CONFIG_ISR_STACK_SIZE
jl memset jl memset

View file

@ -36,8 +36,6 @@ extern "C" {
static ALWAYS_INLINE void arch_kernel_init(void) static ALWAYS_INLINE void arch_kernel_init(void)
{ {
z_irq_setup(); z_irq_setup();
_current_cpu->irq_stack =
Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
} }

View file

@ -46,9 +46,6 @@ extern "C" {
#define _ARC_V2_INIT_IRQ_LOCK_KEY (0x10 | _ARC_V2_DEF_IRQ_LEVEL) #define _ARC_V2_INIT_IRQ_LOCK_KEY (0x10 | _ARC_V2_DEF_IRQ_LEVEL)
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
/* /*
* z_irq_setup * z_irq_setup
* *

View file

@ -20,7 +20,7 @@ _ASM_FILE_PROLOGUE
GTEXT(z_arm_reset) GTEXT(z_arm_reset)
GTEXT(memset) GTEXT(memset)
GDATA(_interrupt_stack) GDATA(z_interrupt_stacks)
#if defined(CONFIG_PLATFORM_SPECIFIC_INIT) #if defined(CONFIG_PLATFORM_SPECIFIC_INIT)
GTEXT(z_platform_init) GTEXT(z_platform_init)
#endif #endif
@ -78,7 +78,7 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
#endif #endif
#ifdef CONFIG_INIT_STACKS #ifdef CONFIG_INIT_STACKS
ldr r0, =_interrupt_stack ldr r0, =z_interrupt_stacks
ldr r1, =0xaa ldr r1, =0xaa
ldr r2, =CONFIG_ISR_STACK_SIZE ldr r2, =CONFIG_ISR_STACK_SIZE
bl memset bl memset
@ -86,9 +86,9 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
/* /*
* Set PSP and use it to boot without using MSP, so that it * Set PSP and use it to boot without using MSP, so that it
* gets set to _interrupt_stack during initialization. * gets set to z_interrupt_stacks during initialization.
*/ */
ldr r0, =_interrupt_stack ldr r0, =z_interrupt_stacks
ldr r1, =CONFIG_ISR_STACK_SIZE ldr r1, =CONFIG_ISR_STACK_SIZE
adds r0, r0, r1 adds r0, r0, r1
msr PSP, r0 msr PSP, r0

View file

@ -21,7 +21,7 @@
_ASM_FILE_PROLOGUE _ASM_FILE_PROLOGUE
GTEXT(z_arm_reset) GTEXT(z_arm_reset)
GDATA(_interrupt_stack) GDATA(z_interrupt_stacks)
GDATA(z_arm_svc_stack) GDATA(z_arm_svc_stack)
GDATA(z_arm_sys_stack) GDATA(z_arm_sys_stack)
GDATA(z_arm_fiq_stack) GDATA(z_arm_fiq_stack)
@ -156,7 +156,7 @@ SECTION_SUBSEC_FUNC(TEXT, _reset_section, __start)
/* IRQ mode stack */ /* IRQ mode stack */
msr CPSR_c, #(MODE_IRQ | I_BIT | F_BIT) msr CPSR_c, #(MODE_IRQ | I_BIT | F_BIT)
ldr sp, =(_interrupt_stack + CONFIG_ISR_STACK_SIZE) ldr sp, =(z_interrupt_stacks + CONFIG_ISR_STACK_SIZE)
/* ABT mode stack */ /* ABT mode stack */
msr CPSR_c, #(MODE_ABT | I_BIT | F_BIT) msr CPSR_c, #(MODE_ABT | I_BIT | F_BIT)

View file

@ -7,6 +7,7 @@
#include <kernel.h> #include <kernel.h>
#include <aarch32/cortex_r/stack.h> #include <aarch32/cortex_r/stack.h>
#include <string.h> #include <string.h>
#include <kernel_internal.h>
K_THREAD_STACK_DEFINE(z_arm_fiq_stack, CONFIG_ARMV7_FIQ_STACK_SIZE); K_THREAD_STACK_DEFINE(z_arm_fiq_stack, CONFIG_ARMV7_FIQ_STACK_SIZE);
K_THREAD_STACK_DEFINE(z_arm_abort_stack, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); K_THREAD_STACK_DEFINE(z_arm_abort_stack, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
@ -21,6 +22,7 @@ void z_arm_init_stacks(void)
memset(z_arm_svc_stack, 0xAA, CONFIG_ARMV7_SVC_STACK_SIZE); memset(z_arm_svc_stack, 0xAA, CONFIG_ARMV7_SVC_STACK_SIZE);
memset(z_arm_abort_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); memset(z_arm_abort_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
memset(z_arm_undef_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); memset(z_arm_undef_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
memset(&_interrupt_stack, 0xAA, CONFIG_ISR_STACK_SIZE); memset(Z_THREAD_STACK_BUFFER(z_interrupt_stacks[0]), 0xAA,
CONFIG_ISR_STACK_SIZE);
} }
#endif #endif

View file

@ -140,7 +140,7 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
/* Switch to SP_ELn and setup the stack */ /* Switch to SP_ELn and setup the stack */
msr spsel, #1 msr spsel, #1
ldr x0, =(_interrupt_stack) ldr x0, =(z_interrupt_stacks)
add x0, x0, #(CONFIG_ISR_STACK_SIZE) add x0, x0, #(CONFIG_ISR_STACK_SIZE)
mov sp, x0 mov sp, x0

View file

@ -26,7 +26,8 @@
extern "C" { extern "C" {
#endif #endif
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE); extern K_THREAD_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
CONFIG_ISR_STACK_SIZE);
/** /**
* *
@ -39,13 +40,13 @@ extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
*/ */
static ALWAYS_INLINE void z_arm_interrupt_stack_setup(void) static ALWAYS_INLINE void z_arm_interrupt_stack_setup(void)
{ {
u32_t msp = (u32_t)(Z_THREAD_STACK_BUFFER(_interrupt_stack)) + u32_t msp = (u32_t)(Z_THREAD_STACK_BUFFER(z_interrupt_stacks[0])) +
K_THREAD_STACK_SIZEOF(_interrupt_stack); K_THREAD_STACK_SIZEOF(z_interrupt_stacks[0]);
__set_MSP(msp); __set_MSP(msp);
#if defined(CONFIG_BUILTIN_STACK_GUARD) #if defined(CONFIG_BUILTIN_STACK_GUARD)
#if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM) #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
__set_MSPLIM((u32_t)_interrupt_stack); __set_MSPLIM((u32_t)z_interrupt_stacks[0]);
#else #else
#error "Built-in MSP limit checks not supported by HW" #error "Built-in MSP limit checks not supported by HW"
#endif #endif

View file

@ -24,8 +24,6 @@ extern "C" {
#else #else
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
extern void z_arm_init_stacks(void); extern void z_arm_init_stacks(void);
/** /**

View file

@ -13,7 +13,7 @@ GTEXT(__reset)
/* imports */ /* imports */
GTEXT(_PrepC) GTEXT(_PrepC)
GTEXT(_interrupt_stack) GTEXT(z_interrupt_stacks)
/* Allow use of r1/at (the assembler temporary register) in this /* Allow use of r1/at (the assembler temporary register) in this
* code, normally reserved for internal assembler use * code, normally reserved for internal assembler use
@ -100,18 +100,18 @@ SECTION_FUNC(TEXT, __start)
#endif /* ALT_CPU_DCACHE_SIZE && defined(CONFIG_INCLUDE_RESET_VECTOR) */ #endif /* ALT_CPU_DCACHE_SIZE && defined(CONFIG_INCLUDE_RESET_VECTOR) */
#ifdef CONFIG_INIT_STACKS #ifdef CONFIG_INIT_STACKS
/* Pre-populate all bytes in _interrupt_stack with 0xAA /* Pre-populate all bytes in z_interrupt_stacks with 0xAA
* init.c enforces that the _interrupt_stack pointer * init.c enforces that the z_interrupt_stacks pointer
* and CONFIG_ISR_STACK_SIZE are a multiple of STACK_ALIGN (4) */ * and CONFIG_ISR_STACK_SIZE are a multiple of STACK_ALIGN (4) */
movhi r1, %hi(_interrupt_stack) movhi r1, %hi(z_interrupt_stacks)
ori r1, r1, %lo(_interrupt_stack) ori r1, r1, %lo(z_interrupt_stacks)
movhi r2, %hi(CONFIG_ISR_STACK_SIZE) movhi r2, %hi(CONFIG_ISR_STACK_SIZE)
ori r2, r2, %lo(CONFIG_ISR_STACK_SIZE) ori r2, r2, %lo(CONFIG_ISR_STACK_SIZE)
/* Put constant 0xaaaaaaaa in r3 */ /* Put constant 0xaaaaaaaa in r3 */
movhi r3, 0xaaaa movhi r3, 0xaaaa
ori r3, r3, 0xaaaa ori r3, r3, 0xaaaa
1: 1:
/* Loop through the _interrupt_stack treating it as an array of /* Loop through the z_interrupt_stacks treating it as an array of
* u32_t, setting each element to r3 */ * u32_t, setting each element to r3 */
stw r3, (r1) stw r3, (r1)
subi r2, r2, 4 subi r2, r2, 4
@ -123,8 +123,8 @@ SECTION_FUNC(TEXT, __start)
* to use this as the CPU boots up with interrupts disabled and we * to use this as the CPU boots up with interrupts disabled and we
* don't turn them on until much later, when the kernel is on * don't turn them on until much later, when the kernel is on
* the main stack */ * the main stack */
movhi sp, %hi(_interrupt_stack) movhi sp, %hi(z_interrupt_stacks)
ori sp, sp, %lo(_interrupt_stack) ori sp, sp, %lo(z_interrupt_stacks)
addi sp, sp, CONFIG_ISR_STACK_SIZE addi sp, sp, CONFIG_ISR_STACK_SIZE
#if defined(CONFIG_GP_LOCAL) || defined(CONFIG_GP_GLOBAL) || \ #if defined(CONFIG_GP_LOCAL) || defined(CONFIG_GP_GLOBAL) || \

View file

@ -43,8 +43,6 @@
extern "C" { extern "C" {
#endif #endif
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View file

@ -30,8 +30,6 @@ extern "C" {
static ALWAYS_INLINE void arch_kernel_init(void) static ALWAYS_INLINE void arch_kernel_init(void)
{ {
_kernel.irq_stack =
Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void

View file

@ -47,12 +47,12 @@ loop_slave_core:
boot_master_core: boot_master_core:
#ifdef CONFIG_INIT_STACKS #ifdef CONFIG_INIT_STACKS
/* Pre-populate all bytes in _interrupt_stack with 0xAA */ /* Pre-populate all bytes in z_interrupt_stacks with 0xAA */
la t0, _interrupt_stack la t0, z_interrupt_stacks
li t1, CONFIG_ISR_STACK_SIZE li t1, CONFIG_ISR_STACK_SIZE
add t1, t1, t0 add t1, t1, t0
/* Populate _interrupt_stack with 0xaaaaaaaa */ /* Populate z_interrupt_stacks with 0xaaaaaaaa */
li t2, 0xaaaaaaaa li t2, 0xaaaaaaaa
aa_loop: aa_loop:
sw t2, 0x00(t0) sw t2, 0x00(t0)
@ -62,9 +62,9 @@ aa_loop:
/* /*
* Initially, setup stack pointer to * Initially, setup stack pointer to
* _interrupt_stack + CONFIG_ISR_STACK_SIZE * z_interrupt_stacks + CONFIG_ISR_STACK_SIZE
*/ */
la sp, _interrupt_stack la sp, z_interrupt_stacks
li t0, CONFIG_ISR_STACK_SIZE li t0, CONFIG_ISR_STACK_SIZE
add sp, sp, t0 add sp, sp, t0

View file

@ -24,8 +24,6 @@ extern "C" {
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
static ALWAYS_INLINE void arch_kernel_init(void) static ALWAYS_INLINE void arch_kernel_init(void)
{ {
_kernel.irq_stack =
Z_THREAD_STACK_BUFFER(_interrupt_stack) + CONFIG_ISR_STACK_SIZE;
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void

View file

@ -49,7 +49,15 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, u16_t cs)
if (arch_is_in_isr()) { if (arch_is_in_isr()) {
/* We were servicing an interrupt */ /* We were servicing an interrupt */
start = (uintptr_t)ARCH_THREAD_STACK_BUFFER(_interrupt_stack); int cpu_id;
#ifdef CONFIG_SMP
cpu_id = arch_curr_cpu()->id;
#else
cpu_id = 0;
#endif
start = (uintptr_t)Z_THREAD_STACK_BUFFER(
z_interrupt_stacks[cpu_id]);
end = start + CONFIG_ISR_STACK_SIZE; end = start + CONFIG_ISR_STACK_SIZE;
} else if ((cs & 0x3U) != 0U || } else if ((cs & 0x3U) != 0U ||
(_current->base.user_options & K_USER) == 0) { (_current->base.user_options & K_USER) == 0) {

View file

@ -26,7 +26,7 @@
GTEXT(z_x86_prep_c) GTEXT(z_x86_prep_c)
GDATA(_idt_base_address) GDATA(_idt_base_address)
GDATA(_interrupt_stack) GDATA(z_interrupt_stacks)
GDATA(z_x86_idt) GDATA(z_x86_idt)
#ifndef CONFIG_GDT_DYNAMIC #ifndef CONFIG_GDT_DYNAMIC
GDATA(_gdt) GDATA(_gdt)
@ -135,7 +135,7 @@ __csSet:
* dual-purposing of this area of memory is safe since * dual-purposing of this area of memory is safe since
* interrupts are disabled until the first context switch. * interrupts are disabled until the first context switch.
* *
* kernel/init.c enforces that the _interrupt_stack pointer and * kernel/init.c enforces that the z_interrupt_stacks pointer and
* the ISR stack size are some multiple of STACK_ALIGN, which * the ISR stack size are some multiple of STACK_ALIGN, which
* is at least 4. * is at least 4.
* *
@ -150,7 +150,7 @@ __csSet:
*/ */
#ifdef CONFIG_INIT_STACKS #ifdef CONFIG_INIT_STACKS
movl $0xAAAAAAAA, %eax movl $0xAAAAAAAA, %eax
leal _interrupt_stack, %edi leal z_interrupt_stacks, %edi
#ifdef CONFIG_X86_STACK_PROTECTION #ifdef CONFIG_X86_STACK_PROTECTION
addl $4096, %edi addl $4096, %edi
#endif #endif
@ -159,7 +159,7 @@ __csSet:
rep stosl rep stosl
#endif #endif
movl $_interrupt_stack, %esp movl $z_interrupt_stacks, %esp
#ifdef CONFIG_X86_STACK_PROTECTION #ifdef CONFIG_X86_STACK_PROTECTION
/* In this configuration, all stacks, including IRQ stack, are declared /* In this configuration, all stacks, including IRQ stack, are declared
* with a 4K non-present guard page preceding the stack buffer * with a 4K non-present guard page preceding the stack buffer

View file

@ -181,8 +181,8 @@ static FUNC_NORETURN __used void df_handler_top(void)
_df_esf.eflags = _main_tss.eflags; _df_esf.eflags = _main_tss.eflags;
/* Restore the main IA task to a runnable state */ /* Restore the main IA task to a runnable state */
_main_tss.esp = (u32_t)(ARCH_THREAD_STACK_BUFFER(_interrupt_stack) + _main_tss.esp = (u32_t)(ARCH_THREAD_STACK_BUFFER(
CONFIG_ISR_STACK_SIZE); z_interrupt_stacks[0]) + CONFIG_ISR_STACK_SIZE);
_main_tss.cs = CODE_SEG; _main_tss.cs = CODE_SEG;
_main_tss.ds = DATA_SEG; _main_tss.ds = DATA_SEG;
_main_tss.es = DATA_SEG; _main_tss.es = DATA_SEG;

View file

@ -7,6 +7,7 @@
#include <kernel_arch_data.h> #include <kernel_arch_data.h>
#include <kernel_arch_func.h> #include <kernel_arch_func.h>
#include <kernel_structs.h> #include <kernel_structs.h>
#include <kernel_internal.h>
#include <arch/x86/multiboot.h> #include <arch/x86/multiboot.h>
#include <arch/x86/mmustructs.h> #include <arch/x86/mmustructs.h>
#include <drivers/interrupt_controller/loapic.h> #include <drivers/interrupt_controller/loapic.h>
@ -85,7 +86,7 @@ struct x86_cpuboot x86_cpuboot[] = {
{ {
.tr = X86_KERNEL_CPU0_TR, .tr = X86_KERNEL_CPU0_TR,
.gs_base = &tss0, .gs_base = &tss0,
.sp = (u64_t) _interrupt_stack + CONFIG_ISR_STACK_SIZE, .sp = (u64_t) z_interrupt_stacks[0] + CONFIG_ISR_STACK_SIZE,
.fn = z_x86_prep_c, .fn = z_x86_prep_c,
#ifdef CONFIG_X86_MMU #ifdef CONFIG_X86_MMU
.ptables = &z_x86_flat_ptables, .ptables = &z_x86_flat_ptables,

View file

@ -19,8 +19,6 @@ FUNC_NORETURN void z_x86_prep_c(void *arg)
struct multiboot_info *info = arg; struct multiboot_info *info = arg;
_kernel.cpus[0].nested = 0; _kernel.cpus[0].nested = 0;
_kernel.cpus[0].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack) +
CONFIG_ISR_STACK_SIZE;
#ifdef CONFIG_X86_VERY_EARLY_CONSOLE #ifdef CONFIG_X86_VERY_EARLY_CONSOLE
z_x86_early_serial_init(); z_x86_early_serial_init();
@ -41,7 +39,7 @@ FUNC_NORETURN void z_x86_prep_c(void *arg)
#endif #endif
#if CONFIG_X86_STACK_PROTECTION #if CONFIG_X86_STACK_PROTECTION
z_x86_mmu_set_flags(&z_x86_kernel_ptables, _interrupt_stack, z_x86_mmu_set_flags(&z_x86_kernel_ptables, z_interrupt_stacks[0],
MMU_PAGE_SIZE, MMU_ENTRY_READ, Z_X86_MMU_RW, MMU_PAGE_SIZE, MMU_ENTRY_READ, Z_X86_MMU_RW,
true); true);
#endif #endif

View file

@ -39,11 +39,6 @@ static inline bool arch_is_in_isr(void)
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN) #define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN) #define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN)
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
extern K_THREAD_STACK_DEFINE(_interrupt_stack1, CONFIG_ISR_STACK_SIZE);
extern K_THREAD_STACK_DEFINE(_interrupt_stack2, CONFIG_ISR_STACK_SIZE);
extern K_THREAD_STACK_DEFINE(_interrupt_stack3, CONFIG_ISR_STACK_SIZE);
struct multiboot_info; struct multiboot_info;
extern FUNC_NORETURN void z_x86_prep_c(void *arg); extern FUNC_NORETURN void z_x86_prep_c(void *arg);

View file

@ -10,6 +10,7 @@
#define ZEPHYR_ARCH_XTENSA_INCLUDE_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_XTENSA_INCLUDE_KERNEL_ARCH_FUNC_H_
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
#include <kernel_internal.h>
#include <kernel_arch_data.h> #include <kernel_arch_data.h>
#include <string.h> #include <string.h>
@ -29,15 +30,14 @@ extern void z_xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
/* Defined in xtensa_context.S */ /* Defined in xtensa_context.S */
extern void z_xt_coproc_init(void); extern void z_xt_coproc_init(void);
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE); extern K_THREAD_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
CONFIG_ISR_STACK_SIZE);
static ALWAYS_INLINE void arch_kernel_init(void) static ALWAYS_INLINE void arch_kernel_init(void)
{ {
_cpu_t *cpu0 = &_kernel.cpus[0]; _cpu_t *cpu0 = &_kernel.cpus[0];
cpu0->nested = 0; cpu0->nested = 0;
cpu0->irq_stack = (Z_THREAD_STACK_BUFFER(_interrupt_stack) +
CONFIG_ISR_STACK_SIZE);
/* The asm2 scheme keeps the kernel pointer in MISC0 for easy /* The asm2 scheme keeps the kernel pointer in MISC0 for easy
* access. That saves 4 bytes of immediate value to store the * access. That saves 4 bytes of immediate value to store the
@ -48,7 +48,7 @@ static ALWAYS_INLINE void arch_kernel_init(void)
WSR(CONFIG_XTENSA_KERNEL_CPU_PTR_SR, cpu0); WSR(CONFIG_XTENSA_KERNEL_CPU_PTR_SR, cpu0);
#ifdef CONFIG_INIT_STACKS #ifdef CONFIG_INIT_STACKS
memset(Z_THREAD_STACK_BUFFER(_interrupt_stack), 0xAA, memset(Z_THREAD_STACK_BUFFER(z_interrupt_stacks[0]), 0xAA,
CONFIG_ISR_STACK_SIZE); CONFIG_ISR_STACK_SIZE);
#endif #endif
} }

View file

@ -126,9 +126,13 @@ extern u32_t z_timestamp_idle; /* timestamp when CPU goes idle */
#endif #endif
extern struct k_thread z_main_thread; extern struct k_thread z_main_thread;
extern struct k_thread z_idle_thread;
extern K_THREAD_STACK_DEFINE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
extern K_THREAD_STACK_DEFINE(z_idle_stack, CONFIG_IDLE_STACK_SIZE); #ifdef CONFIG_MULTITHREADING
extern struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS];
#endif
extern K_THREAD_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
CONFIG_ISR_STACK_SIZE);
#ifdef __cplusplus #ifdef __cplusplus
} }

View file

@ -91,11 +91,15 @@ static inline bool z_is_idle_thread_entry(void *entry_point)
static inline bool z_is_idle_thread_object(struct k_thread *thread) static inline bool z_is_idle_thread_object(struct k_thread *thread)
{ {
#ifdef CONFIG_MULTITHREADING
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
return thread->base.is_idle; return thread->base.is_idle;
#else #else
return thread == &z_idle_thread; return thread == &z_idle_threads[0];
#endif #endif
#else
return false;
#endif /* CONFIG_MULTITHREADING */
} }
static inline bool z_is_thread_pending(struct k_thread *thread) static inline bool z_is_thread_pending(struct k_thread *thread)

View file

@ -58,10 +58,13 @@ u32_t __noinit z_timestamp_idle; /* timestamp when CPU goes idle */
/* init/main and idle threads */ /* init/main and idle threads */
K_THREAD_STACK_DEFINE(z_main_stack, CONFIG_MAIN_STACK_SIZE); K_THREAD_STACK_DEFINE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
K_THREAD_STACK_DEFINE(z_idle_stack, CONFIG_IDLE_STACK_SIZE);
struct k_thread z_main_thread; struct k_thread z_main_thread;
struct k_thread z_idle_thread;
#ifdef CONFIG_MULTITHREADING
struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS];
static K_THREAD_STACK_ARRAY_DEFINE(z_idle_stacks, CONFIG_MP_NUM_CPUS,
CONFIG_IDLE_STACK_SIZE);
#endif /* CONFIG_MULTITHREADING */
/* /*
* storage space for the interrupt stack * storage space for the interrupt stack
@ -71,34 +74,8 @@ struct k_thread z_idle_thread;
* of this area is safe since interrupts are disabled until the kernel context * of this area is safe since interrupts are disabled until the kernel context
* switches to the init thread. * switches to the init thread.
*/ */
K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE); K_THREAD_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
CONFIG_ISR_STACK_SIZE);
/*
* Similar idle thread & interrupt stack definitions for the
* auxiliary CPUs. The declaration macros aren't set up to define an
* array, so do it with a simple test for up to 4 processors. Should
* clean this up in the future.
*/
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1
K_THREAD_STACK_DEFINE(_idle_stack1, CONFIG_IDLE_STACK_SIZE);
static struct k_thread _idle_thread1_s;
k_tid_t const _idle_thread1 = (k_tid_t)&_idle_thread1_s;
K_THREAD_STACK_DEFINE(_interrupt_stack1, CONFIG_ISR_STACK_SIZE);
#endif
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2
K_THREAD_STACK_DEFINE(_idle_stack2, CONFIG_IDLE_STACK_SIZE);
static struct k_thread _idle_thread2_s;
k_tid_t const _idle_thread2 = (k_tid_t)&_idle_thread2_s;
K_THREAD_STACK_DEFINE(_interrupt_stack2, CONFIG_ISR_STACK_SIZE);
#endif
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3
K_THREAD_STACK_DEFINE(_idle_stack3, CONFIG_IDLE_STACK_SIZE);
static struct k_thread _idle_thread3_s;
k_tid_t const _idle_thread3 = (k_tid_t)&_idle_thread3_s;
K_THREAD_STACK_DEFINE(_interrupt_stack3, CONFIG_ISR_STACK_SIZE);
#endif
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
#define initialize_timeouts() do { \ #define initialize_timeouts() do { \
@ -375,35 +352,16 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
z_mark_thread_as_started(&z_main_thread); z_mark_thread_as_started(&z_main_thread);
z_ready_thread(&z_main_thread); z_ready_thread(&z_main_thread);
init_idle_thread(&z_idle_thread, z_idle_stack); for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
_kernel.cpus[0].idle_thread = &z_idle_thread; init_idle_thread(&z_idle_threads[i], z_idle_stacks[i]);
_kernel.cpus[i].idle_thread = &z_idle_threads[i];
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1 _kernel.cpus[i].id = i;
init_idle_thread(_idle_thread1, _idle_stack1); _kernel.cpus[i].irq_stack =
_kernel.cpus[1].idle_thread = _idle_thread1; (Z_THREAD_STACK_BUFFER(z_interrupt_stacks[i]) +
_kernel.cpus[1].id = 1; K_THREAD_STACK_SIZEOF(z_interrupt_stacks[i]));
_kernel.cpus[1].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack1) }
+ CONFIG_ISR_STACK_SIZE;
#endif
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2
init_idle_thread(_idle_thread2, _idle_stack2);
_kernel.cpus[2].idle_thread = _idle_thread2;
_kernel.cpus[2].id = 2;
_kernel.cpus[2].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack2)
+ CONFIG_ISR_STACK_SIZE;
#endif
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3
init_idle_thread(_idle_thread3, _idle_stack3);
_kernel.cpus[3].idle_thread = _idle_thread3;
_kernel.cpus[3].id = 3;
_kernel.cpus[3].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack3)
+ CONFIG_ISR_STACK_SIZE;
#endif
initialize_timeouts(); initialize_timeouts();
} }
static FUNC_NORETURN void switch_to_main_thread(void) static FUNC_NORETURN void switch_to_main_thread(void)

View file

@ -60,10 +60,6 @@ void z_smp_release_global_lock(struct k_thread *thread)
} }
} }
extern k_thread_stack_t _interrupt_stack1[];
extern k_thread_stack_t _interrupt_stack2[];
extern k_thread_stack_t _interrupt_stack3[];
#if CONFIG_MP_NUM_CPUS > 1 #if CONFIG_MP_NUM_CPUS > 1
static FUNC_NORETURN void smp_init_top(void *arg) static FUNC_NORETURN void smp_init_top(void *arg)
{ {
@ -93,19 +89,11 @@ void z_smp_init(void)
{ {
(void)atomic_clear(&start_flag); (void)atomic_clear(&start_flag);
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1 #if defined(CONFIG_SMP) && (CONFIG_MP_NUM_CPUS > 1)
arch_start_cpu(1, _interrupt_stack1, CONFIG_ISR_STACK_SIZE, for (int i = 1; i < CONFIG_MP_NUM_CPUS; i++) {
smp_init_top, &start_flag); arch_start_cpu(i, z_interrupt_stacks[i], CONFIG_ISR_STACK_SIZE,
#endif smp_init_top, &start_flag);
}
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2
arch_start_cpu(2, _interrupt_stack2, CONFIG_ISR_STACK_SIZE,
smp_init_top, &start_flag);
#endif
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3
arch_start_cpu(3, _interrupt_stack3, CONFIG_ISR_STACK_SIZE,
smp_init_top, &start_flag);
#endif #endif
(void)atomic_set(&start_flag, 1); (void)atomic_set(&start_flag, 1);

View file

@ -11,7 +11,7 @@
_ASM_FILE_PROLOGUE _ASM_FILE_PROLOGUE
GDATA(_interrupt_stack) GDATA(z_interrupt_stacks)
GTEXT(z_do_software_reboot) GTEXT(z_do_software_reboot)
SECTION_FUNC(TEXT,z_do_software_reboot) SECTION_FUNC(TEXT,z_do_software_reboot)
@ -45,7 +45,7 @@ SECTION_FUNC(TEXT,z_force_exit_one_nested_irq)
ldrne lr, =0xfffffffd ldrne lr, =0xfffffffd
ldrne r2, =z_do_software_reboot ldrne r2, =z_do_software_reboot
ldr ip, =_interrupt_stack ldr ip, =z_interrupt_stacks
add.w ip, ip, #(___esf_t_SIZEOF * 2) /* enough for a stack frame */ add.w ip, ip, #(___esf_t_SIZEOF * 2) /* enough for a stack frame */
ldr r1, =0xfffffffe ldr r1, =0xfffffffe
and.w r2, r1 and.w r2, r1

View file

@ -131,7 +131,8 @@ static void shell_stack_dump(const struct k_thread *thread, void *user_data)
size, unused, size - unused, size, pcnt); size, unused, size - unused, size, pcnt);
} }
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE); extern K_THREAD_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
CONFIG_ISR_STACK_SIZE);
static int cmd_kernel_stacks(const struct shell *shell, static int cmd_kernel_stacks(const struct shell *shell,
size_t argc, char **argv) size_t argc, char **argv)
@ -144,26 +145,27 @@ static int cmd_kernel_stacks(const struct shell *shell,
k_thread_foreach(shell_stack_dump, (void *)shell); k_thread_foreach(shell_stack_dump, (void *)shell);
/* Placeholder logic for interrupt stack until we have better /* Placeholder logic for interrupt stack until we have better
* kernel support, including dumping all IRQ stacks for SMP systems * kernel support, including dumping arch-specific exception-related
* and hooks to dump arch-specific exception-related stack buffers. * stack buffers.
*
* For now, dump data for the first IRQ stack defined in init.c
*/ */
buf = Z_THREAD_STACK_BUFFER(_interrupt_stack); for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
size = K_THREAD_STACK_SIZEOF(_interrupt_stack); buf = Z_THREAD_STACK_BUFFER(z_interrupt_stacks[i]);
size = K_THREAD_STACK_SIZEOF(z_interrupt_stacks[i]);
for (size_t i = 0; i < K_THREAD_STACK_SIZEOF(_interrupt_stack); i++) { for (size_t i = 0; i < size; i++) {
if (buf[i] == 0xAAU) { if (buf[i] == 0xAAU) {
unused++; unused++;
} else { } else {
break; break;
}
} }
}
shell_print(shell, shell_print(shell,
"%p IRQ 0 (real size %zu):\tunused %zu\tusage %zu / %zu (%zu %%)", "%p IRQ %02d (real size %zu):\tunused %zu\tusage %zu / %zu (%zu %%)",
_interrupt_stack, size, unused, size - unused, size, &z_interrupt_stacks[i], i, size, unused,
((size - unused) * 100U) / size); size - unused, size,
((size - unused) * 100U) / size);
}
return 0; return 0;
} }

View file

@ -55,7 +55,7 @@ void z_impl_test_arm_user_syscall(void)
#if defined(CONFIG_BUILTIN_STACK_GUARD) #if defined(CONFIG_BUILTIN_STACK_GUARD)
zassert_true(__get_PSPLIM() == _current->arch.priv_stack_start, zassert_true(__get_PSPLIM() == _current->arch.priv_stack_start,
"PSPLIM not guarding the thread's privileged stack\n"); "PSPLIM not guarding the thread's privileged stack\n");
zassert_true(__get_MSPLIM() == (u32_t)_interrupt_stack, zassert_true(__get_MSPLIM() == (u32_t)z_interrupt_stacks,
"MSPLIM not guarding the interrupt stack\n"); "MSPLIM not guarding the interrupt stack\n");
#endif #endif
} }
@ -108,7 +108,7 @@ void arm_isr_handler(void *args)
*/ */
zassert_true(__get_PSPLIM() == 0, zassert_true(__get_PSPLIM() == 0,
"PSPLIM not clear\n"); "PSPLIM not clear\n");
zassert_true(__get_MSPLIM() == (u32_t)_interrupt_stack, zassert_true(__get_MSPLIM() == (u32_t)z_interrupt_stacks,
"MSPLIM not guarding the interrupt stack\n"); "MSPLIM not guarding the interrupt stack\n");
#endif #endif
} }
@ -176,7 +176,7 @@ void test_arm_syscalls(void)
#if defined(CONFIG_BUILTIN_STACK_GUARD) #if defined(CONFIG_BUILTIN_STACK_GUARD)
zassert_true(__get_PSPLIM() == _current->stack_info.start, zassert_true(__get_PSPLIM() == _current->stack_info.start,
"PSPLIM not guarding the default stack\n"); "PSPLIM not guarding the default stack\n");
zassert_true(__get_MSPLIM() == (u32_t)_interrupt_stack, zassert_true(__get_MSPLIM() == (u32_t)z_interrupt_stacks,
"MSPLIM not guarding the interrupt stack\n"); "MSPLIM not guarding the interrupt stack\n");
#endif #endif