2021-12-03 10:29:25 -08:00
|
|
|
|
/* Copyright (c) 2021 Intel Corporation
|
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
*/
|
|
|
|
|
#ifndef __INTEL_ADSP_CPU_INIT_H
|
|
|
|
|
#define __INTEL_ADSP_CPU_INIT_H
|
|
|
|
|
|
2023-02-20 11:22:36 +01:00
|
|
|
|
#include <zephyr/arch/arch_inlines.h>
|
2023-04-15 10:46:37 +00:00
|
|
|
|
#include <zephyr/arch/xtensa/arch.h>
|
2021-12-09 10:39:53 -08:00
|
|
|
|
#include <xtensa/config/core-isa.h>
|
2023-02-20 11:22:36 +01:00
|
|
|
|
#include <xtensa/corebits.h>
|
2022-07-14 06:29:02 -04:00
|
|
|
|
#include <adsp_memory.h>
|
2021-12-09 10:39:53 -08:00
|
|
|
|
|
2023-02-20 11:22:36 +01:00
|
|
|
|
#define MEMCTL_VALUE (MEMCTL_INV_EN | MEMCTL_ICWU_MASK | MEMCTL_DCWA_MASK | \
|
|
|
|
|
MEMCTL_DCWU_MASK | MEMCTL_L0IBUF_EN)
|
|
|
|
|
|
|
|
|
|
#define ATOMCTL_BY_RCW BIT(0) /* RCW Transaction for Bypass Memory */
|
|
|
|
|
#define ATOMCTL_WT_RCW BIT(2) /* RCW Transaction for Writethrough Cacheable Memory */
|
|
|
|
|
#define ATOMCTL_WB_RCW BIT(4) /* RCW Transaction for Writeback Cacheable Memory */
|
|
|
|
|
#define ATOMCTL_VALUE (ATOMCTL_BY_RCW | ATOMCTL_WT_RCW | ATOMCTL_WB_RCW)
|
|
|
|
|
|
2021-12-03 10:29:25 -08:00
|
|
|
|
/* Low-level CPU initialization. Call this immediately after entering
|
|
|
|
|
* C code to initialize the cache, protection and synchronization
|
|
|
|
|
* features.
|
|
|
|
|
*/
|
|
|
|
|
static ALWAYS_INLINE void cpu_early_init(void)
|
|
|
|
|
{
|
|
|
|
|
uint32_t reg;
|
|
|
|
|
|
2022-08-08 14:56:41 -07:00
|
|
|
|
#ifdef CONFIG_ADSP_NEED_POWER_ON_CACHE
|
|
|
|
|
/* First, we need to power the cache SRAM banks on! Write a bit
|
|
|
|
|
* for each cache way in the bottom half of the L1CCFG register
|
|
|
|
|
* and poll the top half for them to turn on.
|
2022-07-15 07:25:31 -04:00
|
|
|
|
*/
|
|
|
|
|
uint32_t dmask = BIT(ADSP_CxL1CCAP_DCMWC) - 1;
|
|
|
|
|
uint32_t imask = BIT(ADSP_CxL1CCAP_ICMWC) - 1;
|
|
|
|
|
uint32_t waymask = (imask << 8) | dmask;
|
2021-12-03 10:29:25 -08:00
|
|
|
|
|
2022-07-15 07:25:31 -04:00
|
|
|
|
ADSP_CxL1CCFG_REG = waymask;
|
|
|
|
|
while (((ADSP_CxL1CCFG_REG >> 16) & waymask) != waymask) {
|
|
|
|
|
}
|
2021-12-03 10:29:25 -08:00
|
|
|
|
|
2022-07-15 07:25:31 -04:00
|
|
|
|
/* Prefetcher also power gates, same interface */
|
|
|
|
|
ADSP_CxL1PCFG_REG = 1;
|
|
|
|
|
while ((ADSP_CxL1PCFG_REG & 0x10000) == 0) {
|
2021-12-03 10:29:25 -08:00
|
|
|
|
}
|
2022-07-15 07:25:31 -04:00
|
|
|
|
#endif
|
2021-12-03 10:29:25 -08:00
|
|
|
|
|
|
|
|
|
/* Now set up the Xtensa CPU to enable the cache logic. The
|
|
|
|
|
* details of the fields are somewhat complicated, but per the
|
|
|
|
|
* ISA ref: "Turning on caches at power-up usually consists of
|
|
|
|
|
* writing a constant with bits[31:8] all 1’s to MEMCTL.".
|
|
|
|
|
* Also set bit 0 to enable the LOOP extension instruction
|
|
|
|
|
* fetch buffer.
|
|
|
|
|
*/
|
2021-12-29 19:16:22 +02:00
|
|
|
|
#if XCHAL_USE_MEMCTL
|
2023-02-20 11:22:36 +01:00
|
|
|
|
reg = MEMCTL_VALUE;
|
|
|
|
|
XTENSA_WSR("MEMCTL", reg);
|
|
|
|
|
__asm__ volatile("rsync");
|
2021-12-03 10:29:25 -08:00
|
|
|
|
#endif
|
|
|
|
|
|
2023-05-24 21:50:12 +00:00
|
|
|
|
#if XCHAL_HAVE_THREADPTR
|
|
|
|
|
reg = 0;
|
|
|
|
|
XTENSA_WUR("THREADPTR", reg);
|
|
|
|
|
#endif
|
|
|
|
|
|
2021-12-03 10:29:25 -08:00
|
|
|
|
/* Likewise enable prefetching. Sadly these values are not
|
|
|
|
|
* architecturally defined by Xtensa (they're just documented
|
|
|
|
|
* as priority hints), so this constant is just copied from
|
|
|
|
|
* SOF for now. If we care about prefetch priority tuning
|
|
|
|
|
* we're supposed to ask Cadence I guess.
|
|
|
|
|
*/
|
2022-07-12 18:20:39 -04:00
|
|
|
|
reg = ADSP_L1_CACHE_PREFCTL_VALUE;
|
2023-02-20 11:22:36 +01:00
|
|
|
|
XTENSA_WSR("PREFCTL", reg);
|
|
|
|
|
__asm__ volatile("rsync");
|
2021-12-03 10:29:25 -08:00
|
|
|
|
|
|
|
|
|
/* Finally we need to enable the cache in the Region
|
|
|
|
|
* Protection Option "TLB" entries. The hardware defaults
|
2022-01-07 05:09:39 -08:00
|
|
|
|
* have this set to RW/uncached everywhere.
|
2022-08-24 20:51:30 -07:00
|
|
|
|
*
|
|
|
|
|
* If we have MMU enabled, we don't need to do this right now.
|
|
|
|
|
* Let use the default configuration and properly configure the
|
|
|
|
|
* MMU when running from RAM.
|
2021-12-03 10:29:25 -08:00
|
|
|
|
*/
|
2022-08-24 20:51:30 -07:00
|
|
|
|
#ifndef CONFIG_MMU
|
2022-01-07 05:09:39 -08:00
|
|
|
|
ARCH_XTENSA_SET_RPO_TLB();
|
2022-08-24 20:51:30 -07:00
|
|
|
|
#endif
|
|
|
|
|
|
2021-12-03 10:29:25 -08:00
|
|
|
|
|
|
|
|
|
/* Initialize ATOMCTL: Hardware defaults for S32C1I use
|
|
|
|
|
* "internal" operations, meaning they are atomic only WRT the
|
|
|
|
|
* local CPU! We need external transactions on the shared
|
|
|
|
|
* bus.
|
|
|
|
|
*/
|
2023-02-20 11:22:36 +01:00
|
|
|
|
reg = ATOMCTL_VALUE;
|
|
|
|
|
XTENSA_WSR("ATOMCTL", reg);
|
soc/intel_adsp: Unify Xtensa CPU reset between cores
Startup on these devices was sort of a mess, with multiple variants of
Xtensa and platform initialization code from multiple ancestries being
invoked at different places for different purposes. Just use one code
path for everyone.
Bootloader entry starts with a minimal assembly stub that simply sets
WINDOW{START,BASE}, PS and a stack pointer and then jumps to C code.
That then uses the cpu_early_init() implementation from cAVS 2.5's
secondary cores to finish Xtensa initialization, and then flows
directly into the pre-existing bootloader C code to initialize cache
and memory and copy the HP-SRAM image, then it invokes Zephyr via a
simple C function call to z_cstart().
Likewise, remove the "reset vector" from Zephyr. This was never a
reset vector, reset on these devices goes to a fixed address in a ROM.
CPU initialization is handled explicitly and completely in the
bootloader now, in a way that can be unified between the main and
secondary cores. Entry from the bootloader now goes directly into
z_cstart() via a C call (via a single jump instruction placed at the
entry point address -- that's going away soon too once we're using a
unified link).
Now that vector table initialization happens in a uniform way, there's
no need to copy the VECBASE value during arch_start_cpu().
Finally note that this also reverts the
CONFIG_RESET_VECTOR_IN_BOOTLOADER kconfig variable added for these
platforms, because it's no longer a tunable and true always.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2021-12-09 10:42:55 -08:00
|
|
|
|
|
|
|
|
|
/* Initialize interrupts to "disabled" */
|
|
|
|
|
reg = 0;
|
2023-02-20 11:22:36 +01:00
|
|
|
|
XTENSA_WSR("INTENABLE", reg);
|
soc/intel_adsp: Unify Xtensa CPU reset between cores
Startup on these devices was sort of a mess, with multiple variants of
Xtensa and platform initialization code from multiple ancestries being
invoked at different places for different purposes. Just use one code
path for everyone.
Bootloader entry starts with a minimal assembly stub that simply sets
WINDOW{START,BASE}, PS and a stack pointer and then jumps to C code.
That then uses the cpu_early_init() implementation from cAVS 2.5's
secondary cores to finish Xtensa initialization, and then flows
directly into the pre-existing bootloader C code to initialize cache
and memory and copy the HP-SRAM image, then it invokes Zephyr via a
simple C function call to z_cstart().
Likewise, remove the "reset vector" from Zephyr. This was never a
reset vector, reset on these devices goes to a fixed address in a ROM.
CPU initialization is handled explicitly and completely in the
bootloader now, in a way that can be unified between the main and
secondary cores. Entry from the bootloader now goes directly into
z_cstart() via a C call (via a single jump instruction placed at the
entry point address -- that's going away soon too once we're using a
unified link).
Now that vector table initialization happens in a uniform way, there's
no need to copy the VECBASE value during arch_start_cpu().
Finally note that this also reverts the
CONFIG_RESET_VECTOR_IN_BOOTLOADER kconfig variable added for these
platforms, because it's no longer a tunable and true always.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2021-12-09 10:42:55 -08:00
|
|
|
|
|
|
|
|
|
/* Finally VECBASE. Note that on core 0 startup, we're still
|
|
|
|
|
* running in IMR and the vectors at this address won't be
|
|
|
|
|
* copied into HP-SRAM until later. That's OK, as interrupts
|
|
|
|
|
* are still disabled at this stage and will remain so
|
|
|
|
|
* consistently until Zephyr switches into the main thread.
|
|
|
|
|
*/
|
2022-09-09 12:02:03 -07:00
|
|
|
|
reg = VECBASE_RESET_PADDR_SRAM;
|
2023-02-20 11:22:36 +01:00
|
|
|
|
XTENSA_WSR("VECBASE", reg);
|
2021-12-03 10:29:25 -08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif /* __INTEL_ADSP_CPU_INIT_H */
|