smp: Move arrays to use CONFIG_MP_MAX_NUM_CPUS

Move to use CONFIG_MP_MAX_NUM_CPUS for array size declarations instead
of CONFIG_MP_NUM_CPUS.

Signed-off-by: Kumar Gala <kumar.gala@intel.com>
This commit is contained in:
Kumar Gala 2022-10-12 10:55:36 -05:00 committed by Stephanos Ioannidis
commit c778eb2a56
25 changed files with 38 additions and 38 deletions

View file

@ -20,7 +20,7 @@
volatile struct { volatile struct {
arch_cpustart_t fn; arch_cpustart_t fn;
void *arg; void *arg;
} arc_cpu_init[CONFIG_MP_NUM_CPUS]; } arc_cpu_init[CONFIG_MP_MAX_NUM_CPUS];
/* /*
* arc_cpu_wake_flag is used to sync up master core and slave cores * arc_cpu_wake_flag is used to sync up master core and slave cores
@ -36,7 +36,7 @@ volatile char *arc_cpu_sp;
* _curr_cpu is used to record the struct of _cpu_t of each cpu. * _curr_cpu is used to record the struct of _cpu_t of each cpu.
* for efficient usage in assembly * for efficient usage in assembly
*/ */
volatile _cpu_t *_curr_cpu[CONFIG_MP_NUM_CPUS]; volatile _cpu_t *_curr_cpu[CONFIG_MP_MAX_NUM_CPUS];
/* Called from Zephyr initialization */ /* Called from Zephyr initialization */
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,

View file

@ -32,7 +32,7 @@
*/ */
#if defined(CONFIG_ARC_FIRQ_STACK) #if defined(CONFIG_ARC_FIRQ_STACK)
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
K_KERNEL_STACK_ARRAY_DEFINE(_firq_interrupt_stack, CONFIG_MP_NUM_CPUS, K_KERNEL_STACK_ARRAY_DEFINE(_firq_interrupt_stack, CONFIG_MP_MAX_NUM_CPUS,
CONFIG_ARC_FIRQ_STACK_SIZE); CONFIG_ARC_FIRQ_STACK_SIZE);
#else #else
K_KERNEL_STACK_DEFINE(_firq_interrupt_stack, CONFIG_ARC_FIRQ_STACK_SIZE); K_KERNEL_STACK_DEFINE(_firq_interrupt_stack, CONFIG_ARC_FIRQ_STACK_SIZE);

View file

@ -265,7 +265,7 @@ int arch_float_enable(struct k_thread *thread, unsigned int options)
#if !defined(CONFIG_MULTITHREADING) #if !defined(CONFIG_MULTITHREADING)
K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS, CONFIG_ISR_STACK_SIZE); K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ISR_STACK_SIZE);
K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE); K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
extern void z_main_no_multithreading_entry_wrapper(void *p1, void *p2, void *p3, extern void z_main_no_multithreading_entry_wrapper(void *p1, void *p2, void *p3,

View file

@ -26,7 +26,7 @@
extern "C" { extern "C" {
#endif #endif
K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS, K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
CONFIG_ISR_STACK_SIZE); CONFIG_ISR_STACK_SIZE);
/** /**

View file

@ -11,7 +11,7 @@
volatile struct { volatile struct {
arch_cpustart_t fn; arch_cpustart_t fn;
void *arg; void *arg;
} riscv_cpu_init[CONFIG_MP_NUM_CPUS]; } riscv_cpu_init[CONFIG_MP_MAX_NUM_CPUS];
volatile uintptr_t riscv_cpu_wake_flag; volatile uintptr_t riscv_cpu_wake_flag;
volatile void *riscv_cpu_sp; volatile void *riscv_cpu_sp;

View file

@ -12,7 +12,7 @@
static struct { static struct {
irq_offload_routine_t fn; irq_offload_routine_t fn;
const void *arg; const void *arg;
} offload_params[CONFIG_MP_NUM_CPUS]; } offload_params[CONFIG_MP_MAX_NUM_CPUS];
static void irq_offload_isr(const void *param) static void irq_offload_isr(const void *param)
{ {

View file

@ -22,7 +22,7 @@ extern "C" {
extern void z_xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf); extern void z_xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS, K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
CONFIG_ISR_STACK_SIZE); CONFIG_ISR_STACK_SIZE);
static ALWAYS_INLINE void arch_kernel_init(void) static ALWAYS_INLINE void arch_kernel_init(void)

View file

@ -78,7 +78,7 @@ void default_intr_handler(void *arg)
printk("Unhandled interrupt %d on cpu %d!\n", (int)arg, esp_core_id()); printk("Unhandled interrupt %d on cpu %d!\n", (int)arg, esp_core_id());
} }
static struct intr_alloc_table_entry intr_alloc_table[ESP_INTC_INTS_NUM * CONFIG_MP_NUM_CPUS]; static struct intr_alloc_table_entry intr_alloc_table[ESP_INTC_INTS_NUM * CONFIG_MP_MAX_NUM_CPUS];
static void set_interrupt_handler(int n, intc_handler_t f, void *arg) static void set_interrupt_handler(int n, intc_handler_t f, void *arg)
{ {
@ -92,10 +92,10 @@ static void set_interrupt_handler(int n, intc_handler_t f, void *arg)
static struct vector_desc_t *vector_desc_head; /* implicitly initialized to NULL */ static struct vector_desc_t *vector_desc_head; /* implicitly initialized to NULL */
/* This bitmask has an 1 if the int should be disabled when the flash is disabled. */ /* This bitmask has an 1 if the int should be disabled when the flash is disabled. */
static uint32_t non_iram_int_mask[CONFIG_MP_NUM_CPUS]; static uint32_t non_iram_int_mask[CONFIG_MP_MAX_NUM_CPUS];
/* This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable. */ /* This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable. */
static uint32_t non_iram_int_disabled[CONFIG_MP_NUM_CPUS]; static uint32_t non_iram_int_disabled[CONFIG_MP_MAX_NUM_CPUS];
static bool non_iram_int_disabled_flag[CONFIG_MP_NUM_CPUS]; static bool non_iram_int_disabled_flag[CONFIG_MP_MAX_NUM_CPUS];
/* /*
* Inserts an item into vector_desc list so that the list is sorted * Inserts an item into vector_desc list so that the list is sorted

View file

@ -15,7 +15,7 @@
#include <string.h> #include <string.h>
/* Redistributor base addresses for each core */ /* Redistributor base addresses for each core */
mem_addr_t gic_rdists[CONFIG_MP_NUM_CPUS]; mem_addr_t gic_rdists[CONFIG_MP_MAX_NUM_CPUS];
#if defined(CONFIG_ARMV8_A_NS) || defined(CONFIG_GIC_SINGLE_SECURITY_STATE) #if defined(CONFIG_ARMV8_A_NS) || defined(CONFIG_GIC_SINGLE_SECURITY_STATE)
#define IGROUPR_VAL 0xFFFFFFFFU #define IGROUPR_VAL 0xFFFFFFFFU

View file

@ -24,7 +24,7 @@ LOG_MODULE_REGISTER(intc_gicv3_its, LOG_LEVEL_ERR);
#define GITS_BASER_NR_REGS 8 #define GITS_BASER_NR_REGS 8
/* convenient access to all redistributors base address */ /* convenient access to all redistributors base address */
extern mem_addr_t gic_rdists[CONFIG_MP_NUM_CPUS]; extern mem_addr_t gic_rdists[CONFIG_MP_MAX_NUM_CPUS];
#define SIZE_256 256 #define SIZE_256 256
#define SIZE_4K KB(4) #define SIZE_4K KB(4)

View file

@ -156,7 +156,7 @@ struct _cpu {
typedef struct _cpu _cpu_t; typedef struct _cpu _cpu_t;
struct z_kernel { struct z_kernel {
struct _cpu cpus[CONFIG_MP_NUM_CPUS]; struct _cpu cpus[CONFIG_MP_MAX_NUM_CPUS];
#ifdef CONFIG_PM #ifdef CONFIG_PM
int32_t idle; /* Number of ticks for kernel idling */ int32_t idle; /* Number of ticks for kernel idling */

View file

@ -154,9 +154,9 @@ extern struct k_thread z_main_thread;
#ifdef CONFIG_MULTITHREADING #ifdef CONFIG_MULTITHREADING
extern struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS]; extern struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS];
#endif #endif
K_KERNEL_PINNED_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS, K_KERNEL_PINNED_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
CONFIG_ISR_STACK_SIZE); CONFIG_ISR_STACK_SIZE);
#ifdef CONFIG_GEN_PRIV_STACKS #ifdef CONFIG_GEN_PRIV_STACKS

View file

@ -45,10 +45,10 @@ struct k_thread z_main_thread;
#ifdef CONFIG_MULTITHREADING #ifdef CONFIG_MULTITHREADING
__pinned_bss __pinned_bss
struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS]; struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS];
static K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_idle_stacks, static K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_idle_stacks,
CONFIG_MP_NUM_CPUS, CONFIG_MP_MAX_NUM_CPUS,
CONFIG_IDLE_STACK_SIZE); CONFIG_IDLE_STACK_SIZE);
#endif /* CONFIG_MULTITHREADING */ #endif /* CONFIG_MULTITHREADING */
@ -84,7 +84,7 @@ extern const struct init_entry __init_SMP_start[];
* switches to the init thread. * switches to the init thread.
*/ */
K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_interrupt_stacks, K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_interrupt_stacks,
CONFIG_MP_NUM_CPUS, CONFIG_MP_MAX_NUM_CPUS,
CONFIG_ISR_STACK_SIZE); CONFIG_ISR_STACK_SIZE);
extern void idle(void *unused1, void *unused2, void *unused3); extern void idle(void *unused1, void *unused2, void *unused3);

View file

@ -14,7 +14,7 @@
#define QUEUE_NUM 2 #define QUEUE_NUM 2
/* Amount of execution threads per pair of queues*/ /* Amount of execution threads per pair of queues*/
#define THREADS_NUM (CONFIG_MP_NUM_CPUS+1) #define THREADS_NUM (CONFIG_MP_MAX_NUM_CPUS+1)
/* Amount of packet headers in a queue */ /* Amount of packet headers in a queue */
#define SIZE_OF_QUEUE 5000 #define SIZE_OF_QUEUE 5000

View file

@ -38,7 +38,7 @@ struct cpustart_rec {
volatile struct cpustart_rec *start_rec; volatile struct cpustart_rec *start_rec;
static void *appcpu_top; static void *appcpu_top;
static bool cpus_active[CONFIG_MP_NUM_CPUS]; static bool cpus_active[CONFIG_MP_MAX_NUM_CPUS];
static struct k_spinlock loglock; static struct k_spinlock loglock;
extern void z_sched_ipi(void); extern void z_sched_ipi(void);

View file

@ -67,7 +67,7 @@ struct core_state {
uint32_t intenable; uint32_t intenable;
}; };
static struct core_state core_desc[CONFIG_MP_NUM_CPUS] = { 0 }; static struct core_state core_desc[CONFIG_MP_MAX_NUM_CPUS] = { 0 };
struct lpsram_header { struct lpsram_header {
uint32_t alt_reset_vector; uint32_t alt_reset_vector;

View file

@ -13,7 +13,7 @@
#include <adsp-clk.h> #include <adsp-clk.h>
#include <adsp_shim.h> #include <adsp_shim.h>
static struct cavs_clock_info platform_clocks[CONFIG_MP_NUM_CPUS]; static struct cavs_clock_info platform_clocks[CONFIG_MP_MAX_NUM_CPUS];
static struct k_spinlock lock; static struct k_spinlock lock;
int cavs_clock_freq_enc[] = CAVS_CLOCK_FREQ_ENC; int cavs_clock_freq_enc[] = CAVS_CLOCK_FREQ_ENC;

View file

@ -25,7 +25,7 @@ extern void z_soc_mp_asm_entry(void);
extern void soc_mp_startup(uint32_t cpu); extern void soc_mp_startup(uint32_t cpu);
extern void soc_start_core(int cpu_num); extern void soc_start_core(int cpu_num);
extern bool soc_cpus_active[CONFIG_MP_NUM_CPUS]; extern bool soc_cpus_active[CONFIG_MP_MAX_NUM_CPUS];
/* Legacy cache APIs still used in a few places */ /* Legacy cache APIs still used in a few places */
#define SOC_DCACHE_FLUSH(addr, size) \ #define SOC_DCACHE_FLUSH(addr, size) \

View file

@ -50,7 +50,7 @@ uint32_t _loader_storage_manifest_start;
* to be absolutely sure we don't try to IPI a CPU that isn't ready to * to be absolutely sure we don't try to IPI a CPU that isn't ready to
* start, or else we'll launch it into garbage and crash the DSP. * start, or else we'll launch it into garbage and crash the DSP.
*/ */
bool soc_cpus_active[CONFIG_MP_NUM_CPUS]; bool soc_cpus_active[CONFIG_MP_MAX_NUM_CPUS];
#define NOP4 "nop; nop; nop; nop;" #define NOP4 "nop; nop; nop; nop;"
#define NOP32 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 #define NOP32 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4

View file

@ -126,7 +126,7 @@ static void thread_analyze_cb(const struct k_thread *cthread, void *user_data)
cb(&info); cb(&info);
} }
K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS, K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
CONFIG_ISR_STACK_SIZE); CONFIG_ISR_STACK_SIZE);
static void isr_stacks(void) static void isr_stacks(void)

View file

@ -24,12 +24,12 @@ STATS_NAME(pm_stats, state_last_cycles)
STATS_NAME(pm_stats, state_total_cycles) STATS_NAME(pm_stats, state_total_cycles)
STATS_NAME_END(pm_stats); STATS_NAME_END(pm_stats);
static STATS_SECT_DECL(pm_stats) stats[CONFIG_MP_NUM_CPUS][PM_STATE_COUNT]; static STATS_SECT_DECL(pm_stats) stats[CONFIG_MP_MAX_NUM_CPUS][PM_STATE_COUNT];
#define PM_STAT_NAME_LEN sizeof("pm_cpu_XXX_state_X_stats") #define PM_STAT_NAME_LEN sizeof("pm_cpu_XXX_state_X_stats")
static char names[CONFIG_MP_NUM_CPUS][PM_STATE_COUNT][PM_STAT_NAME_LEN]; static char names[CONFIG_MP_MAX_NUM_CPUS][PM_STATE_COUNT][PM_STAT_NAME_LEN];
static uint32_t time_start[CONFIG_MP_NUM_CPUS]; static uint32_t time_start[CONFIG_MP_MAX_NUM_CPUS];
static uint32_t time_stop[CONFIG_MP_NUM_CPUS]; static uint32_t time_stop[CONFIG_MP_MAX_NUM_CPUS];
static int pm_stats_init(const struct device *dev) static int pm_stats_init(const struct device *dev)
{ {

View file

@ -193,7 +193,7 @@ static void shell_stack_dump(const struct k_thread *thread, void *user_data)
thread, tname ? tname : "NA", size, unused, size - unused, size, pcnt); thread, tname ? tname : "NA", size, unused, size - unused, size, pcnt);
} }
K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS, K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
CONFIG_ISR_STACK_SIZE); CONFIG_ISR_STACK_SIZE);
static int cmd_kernel_stacks(const struct shell *shell, static int cmd_kernel_stacks(const struct shell *shell,

View file

@ -46,7 +46,7 @@ static uint32_t exec_cnt[CONFIG_ZTRESS_MAX_THREADS];
static k_timeout_t backoff[CONFIG_ZTRESS_MAX_THREADS]; static k_timeout_t backoff[CONFIG_ZTRESS_MAX_THREADS];
static k_timeout_t init_backoff[CONFIG_ZTRESS_MAX_THREADS]; static k_timeout_t init_backoff[CONFIG_ZTRESS_MAX_THREADS];
K_THREAD_STACK_ARRAY_DEFINE(stacks, CONFIG_ZTRESS_MAX_THREADS, CONFIG_ZTRESS_STACK_SIZE); K_THREAD_STACK_ARRAY_DEFINE(stacks, CONFIG_ZTRESS_MAX_THREADS, CONFIG_ZTRESS_STACK_SIZE);
static k_tid_t idle_tid[CONFIG_MP_NUM_CPUS]; static k_tid_t idle_tid[CONFIG_MP_MAX_NUM_CPUS];
#define THREAD_NAME(i, _) STRINGIFY(ztress_##i) #define THREAD_NAME(i, _) STRINGIFY(ztress_##i)

View file

@ -10,7 +10,7 @@
#include <zephyr/kernel_structs.h> #include <zephyr/kernel_structs.h>
#include <ksched.h> #include <ksched.h>
static int nested_interrupts[CONFIG_MP_NUM_CPUS]; static int nested_interrupts[CONFIG_MP_MAX_NUM_CPUS];
void __weak sys_trace_thread_create_user(struct k_thread *thread) {} void __weak sys_trace_thread_create_user(struct k_thread *thread) {}
void __weak sys_trace_thread_abort_user(struct k_thread *thread) {} void __weak sys_trace_thread_abort_user(struct k_thread *thread) {}

View file

@ -33,11 +33,11 @@ static void run_on_cpu_threadfn(void *a, void *b, void *c)
static struct k_thread thread_har; static struct k_thread thread_har;
static K_THREAD_STACK_DEFINE(tstack_har, HAR_STACKSZ); static K_THREAD_STACK_DEFINE(tstack_har, HAR_STACKSZ);
static struct k_thread run_on_threads[CONFIG_MP_NUM_CPUS]; static struct k_thread run_on_threads[CONFIG_MP_MAX_NUM_CPUS];
static K_THREAD_STACK_ARRAY_DEFINE(run_on_stacks, CONFIG_MP_NUM_CPUS, RUN_ON_STACKSZ); static K_THREAD_STACK_ARRAY_DEFINE(run_on_stacks, CONFIG_MP_MAX_NUM_CPUS, RUN_ON_STACKSZ);
static volatile bool run_on_flags[CONFIG_MP_NUM_CPUS]; static volatile bool run_on_flags[CONFIG_MP_MAX_NUM_CPUS];
static uint32_t clk_ratios[CONFIG_MP_NUM_CPUS]; static uint32_t clk_ratios[CONFIG_MP_MAX_NUM_CPUS];
static void run_on_cpu(int cpu, void (*fn)(void *), void *arg, bool wait) static void run_on_cpu(int cpu, void (*fn)(void *), void *arg, bool wait)
{ {