diff --git a/boards/xtensa/up_squared_adsp/tools/lib/loader.py b/boards/xtensa/up_squared_adsp/tools/lib/loader.py index eccbf5724b9..0590240895f 100644 --- a/boards/xtensa/up_squared_adsp/tools/lib/loader.py +++ b/boards/xtensa/up_squared_adsp/tools/lib/loader.py @@ -111,6 +111,7 @@ class FirmwareLoader(): self.dev.core_power_up(plat_def.CORE_MASK) self.dev.core_run(plat_def.CORE_0) + self.dev.core_run(plat_def.CORE_1) logging.debug("Wait for IPC DONE bit from ROM") while True: ipc_ack = self.dev.dsp_hipcie.value diff --git a/boards/xtensa/up_squared_adsp/up_squared_adsp_defconfig b/boards/xtensa/up_squared_adsp/up_squared_adsp_defconfig index 9b949677d08..74469094fb7 100644 --- a/boards/xtensa/up_squared_adsp/up_squared_adsp_defconfig +++ b/boards/xtensa/up_squared_adsp/up_squared_adsp_defconfig @@ -1,7 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 CONFIG_MAIN_STACK_SIZE=2048 -CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC=400000000 CONFIG_SOC_INTEL_APL_ADSP=y CONFIG_BOARD_UP_SQUARED_ADSP=y diff --git a/dts/xtensa/intel/intel_apl_adsp.dtsi b/dts/xtensa/intel/intel_apl_adsp.dtsi index 630f677ecdc..bda5029fcf1 100644 --- a/dts/xtensa/intel/intel_apl_adsp.dtsi +++ b/dts/xtensa/intel/intel_apl_adsp.dtsi @@ -81,5 +81,13 @@ interrupts = <0x10 0 0>; interrupt-parent = <&core_intc>; }; + + idc: idc@1200 { + compatible = "intel,cavs-idc"; + label = "CAVS_IDC"; + reg = <0x1200 0x80>; + interrupts = <8 0 0>; + interrupt-parent = <&cavs0>; + }; }; }; diff --git a/soc/xtensa/intel_apl_adsp/CMakeLists.txt b/soc/xtensa/intel_apl_adsp/CMakeLists.txt index b1d80f2bdb0..8f9300b9c95 100644 --- a/soc/xtensa/intel_apl_adsp/CMakeLists.txt +++ b/soc/xtensa/intel_apl_adsp/CMakeLists.txt @@ -5,3 +5,5 @@ zephyr_library_include_directories(${ZEPHYR_BASE}/drivers) zephyr_library_sources(adsp.c) zephyr_library_sources(soc.c) zephyr_library_sources(main_entry.S) + +zephyr_library_sources_ifdef(CONFIG_SMP soc_mp.c) diff --git a/soc/xtensa/intel_apl_adsp/Kconfig.defconfig b/soc/xtensa/intel_apl_adsp/Kconfig.defconfig index d62d6e07386..7736d86009b 100644 --- a/soc/xtensa/intel_apl_adsp/Kconfig.defconfig +++ b/soc/xtensa/intel_apl_adsp/Kconfig.defconfig @@ -9,6 +9,10 @@ config SOC string default "intel_apl_adsp" +config SYS_CLOCK_HW_CYCLES_PER_SEC + default 400000000 if XTENSA_TIMER + default 19200000 if CAVS_TIMER + config IRQ_OFFLOAD_INTNUM default 0 @@ -53,4 +57,27 @@ config LOG_BACKEND_RB_MEM_SIZE endif # LOG + +if SMP + +config MP_NUM_CPUS + default 2 + +config XTENSA_TIMER + default n + +config CAVS_TIMER + default y + +config IPM + default y + +config IPM_CAVS_IDC + default y if IPM + +config SCHED_IPI_SUPPORTED + default y if IPM_CAVS_IDC + +endif + endif diff --git a/soc/xtensa/intel_apl_adsp/include/platform/memory.h b/soc/xtensa/intel_apl_adsp/include/platform/memory.h index 57899d7fdc5..88dba2f83f5 100644 --- a/soc/xtensa/intel_apl_adsp/include/platform/memory.h +++ b/soc/xtensa/intel_apl_adsp/include/platform/memory.h @@ -44,6 +44,10 @@ #define IPC_HOST_BASE 0x00001180 #define IPC_HOST_SIZE 0x00000020 +/* Intra DSP IPC */ +#define IPC_DSP_SIZE 0x00000080 +#define IPC_DSP_BASE(x) (0x00001200 + x * IPC_DSP_SIZE) + /* SRAM window for HOST */ #define HOST_WIN_SIZE 0x00000008 #define HOST_WIN_BASE(x) (0x00001580 + x * HOST_WIN_SIZE) diff --git a/soc/xtensa/intel_apl_adsp/linker.ld b/soc/xtensa/intel_apl_adsp/linker.ld index 45e2c2b20ff..bbc6d4e94d8 100644 --- a/soc/xtensa/intel_apl_adsp/linker.ld +++ b/soc/xtensa/intel_apl_adsp/linker.ld @@ -171,7 +171,16 @@ _memmap_cacheattr_bp_allvalid = 0x22222222; * as cacheattr_set macro sets them both to the same set of * attributes. */ +#ifndef CONFIG_SMP _memmap_cacheattr_intel_apl_adsp = 0xFF42FFF2; +#else +/* + * FIXME: Make 0xA0000000 - 0xBFFFFFFF to bypass cache under SMP + * since there is no data cache manipulation for spinlock, kernel + * object, scheduler, etc... + */ +_memmap_cacheattr_intel_apl_adsp = 0xFF22FFF2; +#endif PROVIDE(_memmap_cacheattr_reset = _memmap_cacheattr_intel_apl_adsp); SECTIONS diff --git a/soc/xtensa/intel_apl_adsp/soc.h b/soc/xtensa/intel_apl_adsp/soc.h index 72503265299..f0bb975b7bd 100644 --- a/soc/xtensa/intel_apl_adsp/soc.h +++ b/soc/xtensa/intel_apl_adsp/soc.h @@ -42,6 +42,8 @@ #define CAVS_L2_AGG_INT_LEVEL4 DT_CAVS_ICTL_2_IRQ #define CAVS_L2_AGG_INT_LEVEL5 DT_CAVS_ICTL_3_IRQ +#define CAVS_ICTL_INT_CPU_OFFSET(x) (0x40 * x) + #define IOAPIC_EDGE 0 #define IOAPIC_HIGH 0 @@ -198,22 +200,48 @@ struct soc_dmic_shim_regs { #define SOC_PWRCTL_DISABLE_PWR_GATING_DSP0 BIT(0) #define SOC_PWRCTL_DISABLE_PWR_GATING_DSP1 BIT(1) +/* DSP Wall Clock Timers (0 and 1) */ +#define DSP_WCT_IRQ(x) \ + SOC_AGGREGATE_IRQ((22 + x), CAVS_L2_AGG_INT_LEVEL2) + +#define DSP_WCT_CS_TA(x) BIT(x) +#define DSP_WCT_CS_TT(x) BIT(4 + x) + struct soc_dsp_shim_regs { u32_t reserved[8]; - u64_t walclk; - u64_t dspwctcs; - u64_t dspwct0c; - u64_t dspwct1c; - u32_t reserved1[14]; + union { + struct { + u32_t walclk32_lo; + u32_t walclk32_hi; + }; + u64_t walclk; + }; + u32_t dspwctcs; + u32_t reserved1[1]; + union { + struct { + u32_t dspwct0c32_lo; + u32_t dspwct0c32_hi; + }; + u64_t dspwct0c; + }; + union { + struct { + u32_t dspwct1c32_lo; + u32_t dspwct1c32_hi; + }; + u64_t dspwct1c; + }; + u32_t reserved2[14]; u32_t clkctl; u32_t clksts; - u32_t reserved2[4]; + u32_t reserved3[4]; u16_t pwrctl; u16_t pwrsts; u32_t lpsctl; u32_t lpsdmas0; u32_t lpsdmas1; - u32_t reserved3[22]; + u32_t reserved4[22]; }; /* macros for data cache operations */ diff --git a/soc/xtensa/intel_apl_adsp/soc_mp.c b/soc/xtensa/intel_apl_adsp/soc_mp.c new file mode 100644 index 00000000000..5cc592b1c3f --- /dev/null +++ b/soc/xtensa/intel_apl_adsp/soc_mp.c @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2019 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +LOG_MODULE_REGISTER(soc_mp, CONFIG_SOC_LOG_LEVEL); + +#include "soc.h" +#include "memory.h" + +#include + +#ifdef CONFIG_SCHED_IPI_SUPPORTED +#include +#include +#include + +/* ROM wake version parsed by ROM during core wake up. */ +#define IDC_ROM_WAKE_VERSION 0x2 + +/* IDC message type. */ +#define IDC_TYPE_SHIFT 24 +#define IDC_TYPE_MASK 0x7f +#define IDC_TYPE(x) (((x) & IDC_TYPE_MASK) << IDC_TYPE_SHIFT) + +/* IDC message header. */ +#define IDC_HEADER_MASK 0xffffff +#define IDC_HEADER(x) ((x) & IDC_HEADER_MASK) + +/* IDC message extension. */ +#define IDC_EXTENSION_MASK 0x3fffffff +#define IDC_EXTENSION(x) ((x) & IDC_EXTENSION_MASK) + +/* IDC power up message. */ +#define IDC_MSG_POWER_UP \ + (IDC_TYPE(0x1) | IDC_HEADER(IDC_ROM_WAKE_VERSION)) + +#define IDC_MSG_POWER_UP_EXT(x) IDC_EXTENSION((x) >> 2) + +static struct device *idc; +#endif + +extern void __start(void); + +struct cpustart_rec { + u32_t cpu; + + arch_cpustart_t fn; + char *stack_top; + void *arg; + u32_t vecbase; + + u32_t alive; + + /* padding to cache line */ + u8_t padding[XCHAL_DCACHE_LINESIZE - 6 * 4]; +}; + +static __aligned(XCHAL_DCACHE_LINESIZE) +struct cpustart_rec start_rec; + +static void *mp_top; + +static void mp_entry2(void) +{ + volatile int ps, ie; + u32_t idc_reg; + + /* Copy over VECBASE from the main CPU for an initial value + * (will need to revisit this if we ever allow a user API to + * change interrupt vectors at runtime). Make sure interrupts + * are locally disabled, then synthesize a PS value that will + * enable them for the user code to pass to irq_unlock() + * later. + */ + __asm__ volatile("rsr.PS %0" : "=r"(ps)); + ps &= ~(PS_EXCM_MASK | PS_INTLEVEL_MASK); + __asm__ volatile("wsr.PS %0" : : "r"(ps)); + + ie = 0; + __asm__ volatile("wsr.INTENABLE %0" : : "r"(ie)); + __asm__ volatile("wsr.VECBASE %0" : : "r"(start_rec.vecbase)); + __asm__ volatile("rsync"); + + /* Set up the CPU pointer. */ + _cpu_t *cpu = &_kernel.cpus[start_rec.cpu]; + + __asm__ volatile( + "wsr." CONFIG_XTENSA_KERNEL_CPU_PTR_SR " %0" : : "r"(cpu)); + + /* Clear busy bit set by power up message */ + idc_reg = idc_read(REG_IDCTFC(0), start_rec.cpu) | REG_IDCTFC_BUSY; + idc_write(REG_IDCTFC(0), start_rec.cpu, idc_reg); + +#ifdef CONFIG_IPM_CAVS_IDC + /* Interrupt must be enabled while running on current core */ + irq_enable(XTENSA_IRQ_NUMBER(DT_INST_0_INTEL_CAVS_IDC_IRQ_0)); +#endif /* CONFIG_IPM_CAVS_IDC */ + + start_rec.alive = 1; + SOC_DCACHE_FLUSH(&start_rec, sizeof(start_rec)); + + start_rec.fn(start_rec.arg); + +#if CONFIG_MP_NUM_CPUS == 1 + /* CPU#1 can be under manual control running custom functions + * instead of participating in general thread execution. + * Put the CPU into idle after those functions return + * so this won't return. + */ + for (;;) { + k_cpu_idle(); + } +#endif +} + +/* Defines a locally callable "function" named mp_stack_switch(). The + * first argument (in register a2 post-ENTRY) is the new stack pointer + * to go into register a1. The second (a3) is the entry point. + * Because this never returns, a0 is used as a scratch register then + * set to zero for the called function (a null return value is the + * signal for "top of stack" to the debugger). + */ +void mp_stack_switch(void *stack, void *entry); +__asm__("\n" + ".align 4 \n" + "mp_stack_switch: \n\t" + + "entry a1, 16 \n\t" + + "movi a0, 0 \n\t" + + "jx a3 \n\t"); + +/* Carefully constructed to use no stack beyond compiler-generated ABI + * instructions. Stack pointer is pointing to __stack at this point. + */ +void z_mp_entry(void) +{ + *(u32_t *)CONFIG_SRAM_BASE_ADDRESS = 0xDEADBEEF; + SOC_DCACHE_FLUSH((u32_t *)CONFIG_SRAM_BASE_ADDRESS, 64); + + mp_stack_switch(mp_top, mp_entry2); +} + +void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, + arch_cpustart_t fn, void *arg) +{ + u32_t vecbase; + u32_t idc_reg; + + __ASSERT(cpu_num == 1, "Only supports only two CPUs!"); + + /* Setup data to boot core #1 */ + __asm__ volatile("rsr.VECBASE %0\n\t" : "=r"(vecbase)); + + start_rec.cpu = cpu_num; + start_rec.fn = fn; + start_rec.stack_top = Z_THREAD_STACK_BUFFER(stack) + sz; + start_rec.arg = arg; + start_rec.vecbase = vecbase; + start_rec.alive = 0; + + mp_top = Z_THREAD_STACK_BUFFER(stack) + sz; + + SOC_DCACHE_FLUSH(&start_rec, sizeof(start_rec)); + +#ifdef CONFIG_SCHED_IPI_SUPPORTED + idc = device_get_binding(DT_INST_0_INTEL_CAVS_IDC_LABEL); +#endif + + /* Enable IDC interrupt on the other core */ + idc_reg = idc_read(REG_IDCCTL, cpu_num); + idc_reg |= REG_IDCCTL_IDCTBIE(0); + idc_write(REG_IDCCTL, cpu_num, idc_reg); + sys_set_bit(DT_CAVS_ICTL_BASE_ADDR + 0x04 + + CAVS_ICTL_INT_CPU_OFFSET(cpu_num), 8); + + /* Send power up message to the other core */ + idc_write(REG_IDCIETC(cpu_num), 0, IDC_MSG_POWER_UP_EXT(RAM_BASE)); + idc_write(REG_IDCITC(cpu_num), 0, IDC_MSG_POWER_UP | REG_IDCITC_BUSY); + + /* Disable IDC interrupt on other core so IPI won't cause + * them to jump to ISR until the core is fully initialized. + */ + idc_reg = idc_read(REG_IDCCTL, cpu_num); + idc_reg &= ~REG_IDCCTL_IDCTBIE(0); + idc_write(REG_IDCCTL, cpu_num, idc_reg); + sys_clear_bit(DT_CAVS_ICTL_BASE_ADDR + 0x04 + + CAVS_ICTL_INT_CPU_OFFSET(cpu_num), 8); + + do { + SOC_DCACHE_INVALIDATE(&start_rec, sizeof(start_rec)); + } while (start_rec.alive == 0); + + /* Clear done bit from responding the power up message */ + idc_reg = idc_read(REG_IDCIETC(cpu_num), 0) | REG_IDCIETC_DONE; + idc_write(REG_IDCIETC(cpu_num), 0, idc_reg); +} + +#ifdef CONFIG_SCHED_IPI_SUPPORTED +FUNC_ALIAS(soc_sched_ipi, arch_sched_ipi, void); +void soc_sched_ipi(void) +{ + if (idc != NULL) { + ipm_send(idc, 0, IPM_CAVS_IDC_MSG_SCHED_IPI_ID, + IPM_CAVS_IDC_MSG_SCHED_IPI_DATA, 0); + } +} +#endif