2019-11-19 12:33:35 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2019 Intel Corporation
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
2022-05-06 11:11:04 +02:00
|
|
|
#include <zephyr/device.h>
|
2019-11-19 12:33:35 +02:00
|
|
|
#include <xtensa/xtruntime.h>
|
2022-05-06 11:11:04 +02:00
|
|
|
#include <zephyr/irq_nextlevel.h>
|
2019-11-19 12:33:35 +02:00
|
|
|
#include <xtensa/hal.h>
|
2022-05-06 11:11:04 +02:00
|
|
|
#include <zephyr/init.h>
|
2022-10-04 16:34:24 +02:00
|
|
|
#include <zephyr/kernel.h>
|
2023-03-01 08:13:48 +02:00
|
|
|
#include <zephyr/pm/pm.h>
|
|
|
|
#include <zephyr/device.h>
|
2023-04-26 18:42:25 +03:00
|
|
|
#include <zephyr/cache.h>
|
2023-03-01 08:13:48 +02:00
|
|
|
#include <cpu_init.h>
|
2019-11-19 12:33:35 +02:00
|
|
|
|
2023-06-13 14:17:21 +03:00
|
|
|
#include <adsp_memory.h>
|
2022-07-14 08:02:57 -04:00
|
|
|
#include <adsp_shim.h>
|
2022-11-02 15:09:25 +01:00
|
|
|
#include <adsp_clk.h>
|
2023-06-02 12:26:23 +03:00
|
|
|
#include <adsp_imr_layout.h>
|
2021-08-07 09:44:09 -07:00
|
|
|
#include <cavs-idc.h>
|
2019-11-19 12:33:35 +02:00
|
|
|
#include "soc.h"
|
|
|
|
|
|
|
|
#ifdef CONFIG_DYNAMIC_INTERRUPTS
|
2022-05-06 11:11:04 +02:00
|
|
|
#include <zephyr/sw_isr_table.h>
|
2019-11-19 12:33:35 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define LOG_LEVEL CONFIG_SOC_LOG_LEVEL
|
2022-05-06 11:11:04 +02:00
|
|
|
#include <zephyr/logging/log.h>
|
2019-11-19 12:33:35 +02:00
|
|
|
LOG_MODULE_REGISTER(soc);
|
|
|
|
|
2021-10-20 11:08:46 -07:00
|
|
|
# define SHIM_GPDMA_BASE_OFFSET 0x6500
|
|
|
|
# define SHIM_GPDMA_BASE(x) (SHIM_GPDMA_BASE_OFFSET + (x) * 0x100)
|
|
|
|
# define SHIM_GPDMA_CLKCTL(x) (SHIM_GPDMA_BASE(x) + 0x4)
|
|
|
|
# define SHIM_CLKCTL_LPGPDMAFDCGB BIT(0)
|
|
|
|
|
2023-07-19 09:40:09 +02:00
|
|
|
#ifdef CONFIG_PM
|
2023-03-01 08:13:48 +02:00
|
|
|
#define SRAM_ALIAS_BASE 0x9E000000
|
|
|
|
#define SRAM_ALIAS_MASK 0xFF000000
|
|
|
|
#define SRAM_ALIAS_OFFSET 0x20000000
|
|
|
|
|
|
|
|
#define L2_INTERRUPT_NUMBER 4
|
|
|
|
#define L2_INTERRUPT_MASK (1<<L2_INTERRUPT_NUMBER)
|
|
|
|
|
|
|
|
#define L3_INTERRUPT_NUMBER 6
|
|
|
|
#define L3_INTERRUPT_MASK (1<<L3_INTERRUPT_NUMBER)
|
|
|
|
|
|
|
|
#define ALL_USED_INT_LEVELS_MASK (L2_INTERRUPT_MASK | L3_INTERRUPT_MASK)
|
|
|
|
|
2023-06-02 12:26:23 +03:00
|
|
|
/*
|
|
|
|
* @biref FW entry point called by ROM during normal boot flow
|
|
|
|
*/
|
|
|
|
extern void rom_entry(void);
|
2023-11-17 15:45:36 +08:00
|
|
|
void mp_resume_entry(void);
|
2023-06-02 12:26:23 +03:00
|
|
|
|
2023-03-01 08:13:48 +02:00
|
|
|
struct core_state {
|
2023-09-12 15:14:54 +08:00
|
|
|
uint32_t a0;
|
|
|
|
uint32_t a1;
|
|
|
|
uint32_t excsave2;
|
2023-03-01 08:13:48 +02:00
|
|
|
uint32_t intenable;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct core_state core_desc[CONFIG_MP_MAX_NUM_CPUS] = {{0}};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Power down procedure.
|
|
|
|
*
|
|
|
|
* Locks its code in L1 cache and shuts down memories.
|
|
|
|
* NOTE: there's no return from this function.
|
|
|
|
*
|
|
|
|
* @param disable_lpsram flag if LPSRAM is to be disabled (whole)
|
|
|
|
* @param hpsram_pg_mask pointer to memory segments power gating mask
|
|
|
|
* (each bit corresponds to one ebb)
|
|
|
|
*/
|
2023-08-29 13:51:54 +03:00
|
|
|
extern void power_down_cavs(bool disable_lpsram, uint32_t __sparse_cache * hpsram_pg_mask);
|
2023-03-01 08:13:48 +02:00
|
|
|
|
|
|
|
static inline void __sparse_cache *uncache_to_cache(void *address)
|
|
|
|
{
|
|
|
|
return (void __sparse_cache *)((uintptr_t)(address) | SRAM_ALIAS_OFFSET);
|
|
|
|
}
|
|
|
|
|
2023-09-12 15:14:54 +08:00
|
|
|
static ALWAYS_INLINE void _save_core_context(void)
|
|
|
|
{
|
|
|
|
uint32_t core_id = arch_proc_id();
|
|
|
|
|
|
|
|
core_desc[core_id].excsave2 = XTENSA_RSR(ZSR_CPU_STR);
|
|
|
|
__asm__ volatile("mov %0, a0" : "=r"(core_desc[core_id].a0));
|
|
|
|
__asm__ volatile("mov %0, a1" : "=r"(core_desc[core_id].a1));
|
|
|
|
sys_cache_data_flush_range(&core_desc[core_id], sizeof(struct core_state));
|
|
|
|
}
|
|
|
|
|
|
|
|
static ALWAYS_INLINE void _restore_core_context(void)
|
|
|
|
{
|
|
|
|
uint32_t core_id = arch_proc_id();
|
|
|
|
|
|
|
|
XTENSA_WSR(ZSR_CPU_STR, core_desc[core_id].excsave2);
|
|
|
|
__asm__ volatile("mov a0, %0" :: "r"(core_desc[core_id].a0));
|
|
|
|
__asm__ volatile("mov a1, %0" :: "r"(core_desc[core_id].a1));
|
|
|
|
__asm__ volatile("rsync");
|
|
|
|
}
|
|
|
|
|
|
|
|
void power_gate_exit(void)
|
|
|
|
{
|
|
|
|
cpu_early_init();
|
|
|
|
sys_cache_data_flush_and_invd_all();
|
|
|
|
_restore_core_context();
|
2023-11-17 15:45:36 +08:00
|
|
|
|
|
|
|
/* Secondary core is resumed by set_dx */
|
|
|
|
if (arch_proc_id()) {
|
|
|
|
mp_resume_entry();
|
|
|
|
}
|
2023-09-12 15:14:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
__asm__(".align 4\n\t"
|
|
|
|
".global dsp_restore_vector\n\t"
|
|
|
|
"dsp_restore_vector:\n\t"
|
|
|
|
" movi a0, 0\n\t"
|
|
|
|
" movi a1, 1\n\t"
|
|
|
|
" movi a2, 0x40020\n\t"/* PS_UM|PS_WOE */
|
|
|
|
" wsr a2, PS\n\t"
|
|
|
|
" wsr a1, WINDOWSTART\n\t"
|
|
|
|
" wsr a0, WINDOWBASE\n\t"
|
|
|
|
" rsync\n\t"
|
|
|
|
" movi a1, z_interrupt_stacks\n\t"
|
|
|
|
" rsr a2, PRID\n\t"
|
|
|
|
" movi a3, " STRINGIFY(CONFIG_ISR_STACK_SIZE) "\n\t"
|
|
|
|
" mull a2, a2, a3\n\t"
|
|
|
|
" add a2, a2, a3\n\t"
|
|
|
|
" add a1, a1, a2\n\t"
|
|
|
|
" call0 power_gate_exit\n\t");
|
|
|
|
|
2023-07-17 17:03:00 +02:00
|
|
|
void pm_state_set(enum pm_state state, uint8_t substate_id)
|
2023-03-01 08:13:48 +02:00
|
|
|
{
|
|
|
|
ARG_UNUSED(substate_id);
|
|
|
|
uint32_t cpu = arch_proc_id();
|
|
|
|
|
|
|
|
if (state == PM_STATE_SOFT_OFF) {
|
|
|
|
core_desc[cpu].intenable = XTENSA_RSR("INTENABLE");
|
|
|
|
z_xt_ints_off(0xffffffff);
|
2023-09-12 15:14:54 +08:00
|
|
|
xthal_window_spill();
|
|
|
|
_save_core_context();
|
2023-03-01 08:13:48 +02:00
|
|
|
soc_cpus_active[cpu] = false;
|
2023-04-18 13:44:01 +00:00
|
|
|
sys_cache_data_flush_and_invd_all();
|
2023-03-01 08:13:48 +02:00
|
|
|
if (cpu == 0) {
|
2023-07-27 12:27:06 -07:00
|
|
|
uint32_t hpsram_mask[HPSRAM_SEGMENTS] = {0};
|
2023-06-02 12:26:23 +03:00
|
|
|
|
|
|
|
struct imr_header hdr = {
|
|
|
|
.adsp_imr_magic = ADSP_IMR_MAGIC_VALUE,
|
|
|
|
.imr_restore_vector = rom_entry,
|
|
|
|
};
|
2023-08-31 21:25:59 +03:00
|
|
|
struct imr_layout *imr_layout =
|
|
|
|
z_soc_uncached_ptr((__sparse_force void __sparse_cache *)
|
|
|
|
L3_MEM_BASE_ADDR);
|
2023-08-29 13:49:11 +03:00
|
|
|
|
2023-06-02 12:26:23 +03:00
|
|
|
imr_layout->imr_state.header = hdr;
|
|
|
|
|
2023-07-27 12:27:06 -07:00
|
|
|
#ifdef CONFIG_ADSP_POWER_DOWN_HPSRAM
|
2023-03-01 08:13:48 +02:00
|
|
|
/* turn off all HPSRAM banks - get a full bitmap */
|
2023-06-13 14:17:21 +03:00
|
|
|
for (int i = 0; i < HPSRAM_SEGMENTS; i++)
|
|
|
|
hpsram_mask[i] = HPSRAM_MEMMASK(i);
|
2023-07-27 12:27:06 -07:00
|
|
|
#endif /* CONFIG_ADSP_POWER_DOWN_HPSRAM */
|
2023-03-01 08:13:48 +02:00
|
|
|
/* do power down - this function won't return */
|
2023-06-13 14:17:21 +03:00
|
|
|
power_down_cavs(true, uncache_to_cache(&hpsram_mask[0]));
|
2023-03-01 08:13:48 +02:00
|
|
|
} else {
|
|
|
|
k_cpu_idle();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
__ASSERT(false, "invalid argument - unsupported power state");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle SOC specific activity after Low Power Mode Exit */
|
2023-07-17 17:03:00 +02:00
|
|
|
void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
|
2023-03-01 08:13:48 +02:00
|
|
|
{
|
|
|
|
ARG_UNUSED(substate_id);
|
|
|
|
uint32_t cpu = arch_proc_id();
|
|
|
|
|
|
|
|
if (state == PM_STATE_SOFT_OFF) {
|
|
|
|
soc_cpus_active[cpu] = true;
|
2023-04-18 13:44:01 +00:00
|
|
|
sys_cache_data_flush_and_invd_all();
|
2023-03-01 08:13:48 +02:00
|
|
|
z_xt_ints_on(core_desc[cpu].intenable);
|
|
|
|
} else {
|
|
|
|
__ASSERT(false, "invalid argument - unsupported power state");
|
|
|
|
}
|
|
|
|
}
|
2023-07-19 09:40:09 +02:00
|
|
|
#endif /* CONFIG_PM */
|
2022-07-13 17:52:50 -04:00
|
|
|
|
2023-11-03 14:13:37 +08:00
|
|
|
#ifdef CONFIG_ARCH_CPU_IDLE_CUSTOM
|
|
|
|
/* xt-clang removes any NOPs more than 8. So we need to set
|
|
|
|
* no optimization to avoid those NOPs from being removed.
|
|
|
|
*
|
|
|
|
* This function is simply enough and full of hand written
|
|
|
|
* assembly that optimization is not really meaningful
|
|
|
|
* anyway. So we can skip optimization unconditionally.
|
|
|
|
* Re-evalulate its use and add #ifdef if this assumption
|
|
|
|
* is no longer valid.
|
|
|
|
*/
|
|
|
|
__no_optimization
|
|
|
|
void arch_cpu_idle(void)
|
|
|
|
{
|
|
|
|
sys_trace_idle();
|
|
|
|
|
|
|
|
/* Just spin forever with interrupts unmasked, for platforms
|
|
|
|
* where WAITI can't be used or where its behavior is
|
|
|
|
* complicated (Intel DSPs will power gate on idle entry under
|
|
|
|
* some circumstances)
|
|
|
|
*/
|
|
|
|
if (IS_ENABLED(CONFIG_XTENSA_CPU_IDLE_SPIN)) {
|
|
|
|
__asm__ volatile("rsil a0, 0");
|
|
|
|
__asm__ volatile("loop_forever: j loop_forever");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cribbed from SOF: workaround for a bug in some versions of
|
|
|
|
* the LX6 IP. Preprocessor ugliness avoids the need to
|
|
|
|
* figure out how to get the compiler to unroll a loop.
|
|
|
|
*/
|
|
|
|
if (IS_ENABLED(CONFIG_XTENSA_WAITI_BUG)) {
|
|
|
|
#define NOP4 __asm__ volatile("nop; nop; nop; nop");
|
|
|
|
#define NOP32 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4
|
|
|
|
#define NOP128() NOP32 NOP32 NOP32 NOP32
|
|
|
|
NOP128();
|
|
|
|
#undef NOP128
|
|
|
|
#undef NOP32
|
|
|
|
#undef NOP4
|
|
|
|
__asm__ volatile("isync; extw");
|
|
|
|
}
|
|
|
|
|
|
|
|
__asm__ volatile ("waiti 0");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-07-29 19:17:21 -04:00
|
|
|
__imr void power_init(void)
|
2019-11-19 12:33:35 +02:00
|
|
|
{
|
soc: intel_adsp: Clean up shim driver
Each platform was defining its own shim.h header, with slightly
variant field definitions, for a register block that is almost
completely compatible between versions. This is made worse by the
fact that these represent an API imported fairly early from SOF, the
upstream version of which has since diverged.
Move the existing shim struct into a header ("cavs-shim.h") of its
own, remove a bunch of unused symbols, fill in definitions for some
registers that were left out, correct naming to match the hardware
docs in a few places, make sure all hardware dependencies are source
from devicetree only, and modify existing usage to use the new API
exclusively.
Interestingly this leaves the older shim.h header in place, as it
turns out to contain definitions for a bunch of things that were never
part of the shim register block. Those will be unified in separate
patches.
Finally: note that the existing IPM_CAVS_IDC driver (soon to be
removed from all the intel_adsp soc's) is still using the old API, so
redeclare the minimal subset that it needs for the benefit of the
platforms in transition.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2021-09-05 15:27:46 -07:00
|
|
|
/* Request HP ring oscillator and
|
2021-08-02 10:09:11 -07:00
|
|
|
* wait for status to indicate it's ready.
|
|
|
|
*/
|
soc: intel_adsp: Clean up shim driver
Each platform was defining its own shim.h header, with slightly
variant field definitions, for a register block that is almost
completely compatible between versions. This is made worse by the
fact that these represent an API imported fairly early from SOF, the
upstream version of which has since diverged.
Move the existing shim struct into a header ("cavs-shim.h") of its
own, remove a bunch of unused symbols, fill in definitions for some
registers that were left out, correct naming to match the hardware
docs in a few places, make sure all hardware dependencies are source
from devicetree only, and modify existing usage to use the new API
exclusively.
Interestingly this leaves the older shim.h header in place, as it
turns out to contain definitions for a bunch of things that were never
part of the shim register block. Those will be unified in separate
patches.
Finally: note that the existing IPM_CAVS_IDC driver (soon to be
removed from all the intel_adsp soc's) is still using the old API, so
redeclare the minimal subset that it needs for the benefit of the
platforms in transition.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2021-09-05 15:27:46 -07:00
|
|
|
CAVS_SHIM.clkctl |= CAVS_CLKCTL_RHROSCC;
|
|
|
|
while ((CAVS_SHIM.clkctl & CAVS_CLKCTL_RHROSCC) != CAVS_CLKCTL_RHROSCC) {
|
2021-08-02 10:09:11 -07:00
|
|
|
k_busy_wait(10);
|
|
|
|
}
|
|
|
|
|
soc: intel_adsp: Clean up shim driver
Each platform was defining its own shim.h header, with slightly
variant field definitions, for a register block that is almost
completely compatible between versions. This is made worse by the
fact that these represent an API imported fairly early from SOF, the
upstream version of which has since diverged.
Move the existing shim struct into a header ("cavs-shim.h") of its
own, remove a bunch of unused symbols, fill in definitions for some
registers that were left out, correct naming to match the hardware
docs in a few places, make sure all hardware dependencies are source
from devicetree only, and modify existing usage to use the new API
exclusively.
Interestingly this leaves the older shim.h header in place, as it
turns out to contain definitions for a bunch of things that were never
part of the shim register block. Those will be unified in separate
patches.
Finally: note that the existing IPM_CAVS_IDC driver (soon to be
removed from all the intel_adsp soc's) is still using the old API, so
redeclare the minimal subset that it needs for the benefit of the
platforms in transition.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2021-09-05 15:27:46 -07:00
|
|
|
/* Request HP Ring Oscillator
|
2021-08-02 10:09:11 -07:00
|
|
|
* Select HP Ring Oscillator
|
|
|
|
* High Power Domain PLL Clock Select device by 2
|
|
|
|
* Low Power Domain PLL Clock Select device by 4
|
|
|
|
* Disable Tensilica Core(s) Prevent Local Clock Gating
|
|
|
|
* - Disabling "prevent clock gating" means allowing clock gating
|
|
|
|
*/
|
soc: intel_adsp: Clean up shim driver
Each platform was defining its own shim.h header, with slightly
variant field definitions, for a register block that is almost
completely compatible between versions. This is made worse by the
fact that these represent an API imported fairly early from SOF, the
upstream version of which has since diverged.
Move the existing shim struct into a header ("cavs-shim.h") of its
own, remove a bunch of unused symbols, fill in definitions for some
registers that were left out, correct naming to match the hardware
docs in a few places, make sure all hardware dependencies are source
from devicetree only, and modify existing usage to use the new API
exclusively.
Interestingly this leaves the older shim.h header in place, as it
turns out to contain definitions for a bunch of things that were never
part of the shim register block. Those will be unified in separate
patches.
Finally: note that the existing IPM_CAVS_IDC driver (soon to be
removed from all the intel_adsp soc's) is still using the old API, so
redeclare the minimal subset that it needs for the benefit of the
platforms in transition.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2021-09-05 15:27:46 -07:00
|
|
|
CAVS_SHIM.clkctl = (CAVS_CLKCTL_RHROSCC |
|
|
|
|
CAVS_CLKCTL_OCS |
|
|
|
|
CAVS_CLKCTL_LMCS);
|
2021-08-02 10:09:11 -07:00
|
|
|
|
|
|
|
/* Prevent LP GPDMA 0 & 1 clock gating */
|
|
|
|
sys_write32(SHIM_CLKCTL_LPGPDMAFDCGB, SHIM_GPDMA_CLKCTL(0));
|
|
|
|
sys_write32(SHIM_CLKCTL_LPGPDMAFDCGB, SHIM_GPDMA_CLKCTL(1));
|
|
|
|
|
|
|
|
/* Disable power gating for first cores */
|
2021-11-22 11:40:29 -08:00
|
|
|
CAVS_SHIM.pwrctl |= CAVS_PWRCTL_TCPDSPPG(0);
|
2021-08-02 10:09:11 -07:00
|
|
|
|
2021-06-24 14:56:37 -07:00
|
|
|
/* On cAVS 1.8+, we must demand ownership of the timestamping
|
|
|
|
* and clock generator registers. Lacking the former will
|
|
|
|
* prevent wall clock timer interrupts from arriving, even
|
|
|
|
* though the device itself is operational.
|
|
|
|
*/
|
|
|
|
sys_write32(GENO_MDIVOSEL | GENO_DIOPTOSEL, DSP_INIT_GENO);
|
2022-07-15 08:33:55 -04:00
|
|
|
sys_write32(IOPO_DMIC_FLAG | IOPO_I2SSEL_MASK, DSP_INIT_IOPO);
|
2021-09-04 08:15:21 -07:00
|
|
|
}
|