2022-07-01 11:36:13 -07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2022 Intel Corporation.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
includes: prefer <zephyr/kernel.h> over <zephyr/zephyr.h>
As of today <zephyr/zephyr.h> is 100% equivalent to <zephyr/kernel.h>.
This patch proposes to then include <zephyr/kernel.h> instead of
<zephyr/zephyr.h> since it is more clear that you are including the
Kernel APIs and (probably) nothing else. <zephyr/zephyr.h> sounds like a
catch-all header that may be confusing. Most applications need to
include a bunch of other things to compile, e.g. driver headers or
subsystem headers like BT, logging, etc.
The idea of a catch-all header in Zephyr is probably not feasible
anyway. Reason is that Zephyr is not a library, like it could be for
example `libpython`. Zephyr provides many utilities nowadays: a kernel,
drivers, subsystems, etc and things will likely grow. A catch-all header
would be massive, difficult to keep up-to-date. It is also likely that
an application will only build a small subset. Note that subsystem-level
headers may use a catch-all approach to make things easier, though.
NOTE: This patch is **NOT** removing the header, just removing its usage
in-tree. I'd advocate for its deprecation (add a #warning on it), but I
understand many people will have concerns.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2022-08-25 09:58:46 +02:00
|
|
|
#include <zephyr/kernel.h>
|
2022-07-01 11:36:13 -07:00
|
|
|
#include <zephyr/pm/pm.h>
|
2022-10-24 15:59:12 +02:00
|
|
|
#include <zephyr/device.h>
|
2023-01-20 14:46:51 +01:00
|
|
|
#include <zephyr/debug/sparse.h>
|
2023-04-18 13:44:01 +00:00
|
|
|
#include <zephyr/cache.h>
|
2022-07-11 23:38:19 +02:00
|
|
|
#include <cpu_init.h>
|
2022-07-01 11:36:13 -07:00
|
|
|
|
2022-10-02 14:36:56 -04:00
|
|
|
#include <adsp_boot.h>
|
2022-10-02 14:46:51 -04:00
|
|
|
#include <adsp_power.h>
|
2022-10-24 15:59:12 +02:00
|
|
|
#include <adsp_memory.h>
|
|
|
|
#include <adsp_imr_layout.h>
|
2022-11-21 10:06:34 +01:00
|
|
|
#include <zephyr/drivers/mm/mm_drv_intel_adsp_mtl_tlb.h>
|
|
|
|
|
2022-07-11 23:38:19 +02:00
|
|
|
#define LPSRAM_MAGIC_VALUE 0x13579BDF
|
2022-07-28 23:24:33 +02:00
|
|
|
#define LPSCTL_BATTR_MASK GENMASK(16, 12)
|
2022-07-01 11:36:13 -07:00
|
|
|
#define SRAM_ALIAS_BASE 0xA0000000
|
|
|
|
#define SRAM_ALIAS_MASK 0xF0000000
|
2022-07-11 23:38:19 +02:00
|
|
|
|
2022-07-29 21:45:52 -04:00
|
|
|
__imr void power_init(void)
|
|
|
|
{
|
|
|
|
/* Disable idle power gating */
|
2023-01-09 14:25:57 +01:00
|
|
|
DSPCS.bootctl[0].bctl |= DSPBR_BCTL_WAITIPCG | DSPBR_BCTL_WAITIPPG;
|
2022-07-29 21:45:52 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PM
|
2022-07-01 11:36:13 -07:00
|
|
|
|
|
|
|
#define uncache_to_cache(address) \
|
|
|
|
((__typeof__(address))(((uint32_t)(address) & \
|
|
|
|
~SRAM_ALIAS_MASK) | SRAM_ALIAS_BASE))
|
|
|
|
|
2022-07-11 23:38:19 +02:00
|
|
|
#define L2_INTERRUPT_NUMBER 4
|
|
|
|
#define L2_INTERRUPT_MASK (1<<L2_INTERRUPT_NUMBER)
|
|
|
|
|
|
|
|
#define L3_INTERRUPT_NUMBER 6
|
|
|
|
#define L3_INTERRUPT_MASK (1<<L3_INTERRUPT_NUMBER)
|
|
|
|
|
|
|
|
#define ALL_USED_INT_LEVELS_MASK (L2_INTERRUPT_MASK | L3_INTERRUPT_MASK)
|
|
|
|
|
2022-07-01 11:36:13 -07:00
|
|
|
/**
|
|
|
|
* @brief Power down procedure.
|
|
|
|
*
|
|
|
|
* Locks its code in L1 cache and shuts down memories.
|
|
|
|
* NOTE: there's no return from this function.
|
|
|
|
*
|
|
|
|
* @param disable_lpsram flag if LPSRAM is to be disabled (whole)
|
|
|
|
* @param hpsram_pg_mask pointer to memory segments power gating mask
|
|
|
|
* (each bit corresponds to one ebb)
|
|
|
|
* @param response_to_ipc flag if ipc response should be send during power down
|
|
|
|
*/
|
2022-07-11 17:51:38 +02:00
|
|
|
extern void power_down(bool disable_lpsram, uint32_t *hpsram_pg_mask,
|
2022-07-01 11:36:13 -07:00
|
|
|
bool response_to_ipc);
|
|
|
|
|
2022-12-15 16:21:05 +01:00
|
|
|
#ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
|
2022-10-24 15:59:12 +02:00
|
|
|
/**
|
|
|
|
* @brief platform specific context restore procedure
|
|
|
|
*
|
|
|
|
* Should be called when soc context restore is completed
|
|
|
|
*/
|
|
|
|
extern void platform_context_restore(void);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @brief pointer to a persistent storage space, to be set by platform code
|
|
|
|
*/
|
|
|
|
uint8_t *global_imr_ram_storage;
|
|
|
|
|
|
|
|
/*8
|
|
|
|
* @biref a d3 restore boot entry point
|
|
|
|
*/
|
2022-11-22 12:59:10 +01:00
|
|
|
extern void boot_entry_d3_restore(void);
|
2022-12-15 16:21:05 +01:00
|
|
|
#else
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @biref FW entry point called by ROM during normal boot flow
|
|
|
|
*/
|
|
|
|
extern void rom_entry(void);
|
|
|
|
|
|
|
|
#endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
|
2022-10-24 15:59:12 +02:00
|
|
|
|
2022-07-28 23:24:33 +02:00
|
|
|
/* NOTE: This struct will grow with all values that have to be stored for
|
|
|
|
* proper cpu restore after PG.
|
|
|
|
*/
|
|
|
|
struct core_state {
|
2022-07-11 23:38:19 +02:00
|
|
|
uint32_t a0;
|
|
|
|
uint32_t a1;
|
2022-07-28 23:24:33 +02:00
|
|
|
uint32_t vecbase;
|
|
|
|
uint32_t excsave2;
|
|
|
|
uint32_t excsave3;
|
|
|
|
uint32_t thread_ptr;
|
|
|
|
uint32_t intenable;
|
2022-10-28 11:30:45 +02:00
|
|
|
uint32_t bctl;
|
2022-07-28 23:24:33 +02:00
|
|
|
};
|
|
|
|
|
2022-11-22 13:17:04 +01:00
|
|
|
static struct core_state core_desc[CONFIG_MP_MAX_NUM_CPUS] = {{0}};
|
2022-07-28 23:24:33 +02:00
|
|
|
|
2022-07-11 23:38:19 +02:00
|
|
|
struct lpsram_header {
|
|
|
|
uint32_t alt_reset_vector;
|
|
|
|
uint32_t adsp_lpsram_magic;
|
|
|
|
void *lp_restore_vector;
|
|
|
|
uint32_t reserved;
|
|
|
|
uint32_t slave_core_vector;
|
|
|
|
uint8_t rom_bypass_vectors_reserved[0xC00 - 0x14];
|
|
|
|
};
|
|
|
|
|
2022-07-28 23:24:33 +02:00
|
|
|
static ALWAYS_INLINE void _save_core_context(uint32_t core_id)
|
|
|
|
{
|
2022-09-06 11:00:42 -07:00
|
|
|
core_desc[core_id].vecbase = XTENSA_RSR("VECBASE");
|
|
|
|
core_desc[core_id].excsave2 = XTENSA_RSR("EXCSAVE2");
|
|
|
|
core_desc[core_id].excsave3 = XTENSA_RSR("EXCSAVE3");
|
|
|
|
core_desc[core_id].thread_ptr = XTENSA_RUR("THREADPTR");
|
2022-07-11 23:38:19 +02:00
|
|
|
__asm__ volatile("mov %0, a0" : "=r"(core_desc[core_id].a0));
|
|
|
|
__asm__ volatile("mov %0, a1" : "=r"(core_desc[core_id].a1));
|
2023-02-24 23:13:49 +01:00
|
|
|
sys_cache_data_flush_range(&core_desc[core_id], sizeof(struct core_state));
|
2022-07-28 23:24:33 +02:00
|
|
|
}
|
|
|
|
|
2022-07-11 23:38:19 +02:00
|
|
|
static ALWAYS_INLINE void _restore_core_context(void)
|
|
|
|
{
|
|
|
|
uint32_t core_id = arch_proc_id();
|
|
|
|
|
2022-09-06 11:00:42 -07:00
|
|
|
XTENSA_WSR("VECBASE", core_desc[core_id].vecbase);
|
|
|
|
XTENSA_WSR("EXCSAVE2", core_desc[core_id].excsave2);
|
|
|
|
XTENSA_WSR("EXCSAVE3", core_desc[core_id].excsave3);
|
|
|
|
XTENSA_WUR("THREADPTR", core_desc[core_id].thread_ptr);
|
2022-07-11 23:38:19 +02:00
|
|
|
__asm__ volatile("mov a0, %0" :: "r"(core_desc[core_id].a0));
|
|
|
|
__asm__ volatile("mov a1, %0" :: "r"(core_desc[core_id].a1));
|
|
|
|
__asm__ volatile("rsync");
|
|
|
|
}
|
|
|
|
|
|
|
|
void dsp_restore_vector(void);
|
|
|
|
|
2022-07-28 23:24:33 +02:00
|
|
|
void power_gate_entry(uint32_t core_id)
|
|
|
|
{
|
2022-07-11 23:38:19 +02:00
|
|
|
xthal_window_spill();
|
2023-02-24 23:13:49 +01:00
|
|
|
sys_cache_data_flush_and_invd_all();
|
2022-07-28 23:24:33 +02:00
|
|
|
_save_core_context(core_id);
|
2023-02-24 23:13:49 +01:00
|
|
|
if (core_id == 0) {
|
|
|
|
struct lpsram_header *lpsheader =
|
|
|
|
(struct lpsram_header *) DT_REG_ADDR(DT_NODELABEL(sram1));
|
|
|
|
|
|
|
|
lpsheader->adsp_lpsram_magic = LPSRAM_MAGIC_VALUE;
|
|
|
|
lpsheader->lp_restore_vector = &dsp_restore_vector;
|
|
|
|
sys_cache_data_flush_range(lpsheader, sizeof(struct lpsram_header));
|
|
|
|
/* Re-enabling interrupts for core 0 because someone has to wake-up us
|
|
|
|
* from power gaiting.
|
|
|
|
*/
|
|
|
|
z_xt_ints_on(ALL_USED_INT_LEVELS_MASK);
|
|
|
|
}
|
|
|
|
|
2022-07-28 23:24:33 +02:00
|
|
|
soc_cpus_active[core_id] = false;
|
2023-02-24 23:13:49 +01:00
|
|
|
sys_cache_data_flush_range(soc_cpus_active, sizeof(soc_cpus_active));
|
2022-07-28 23:24:33 +02:00
|
|
|
k_cpu_idle();
|
2022-07-11 23:38:19 +02:00
|
|
|
z_xt_ints_off(0xffffffff);
|
2022-07-28 23:24:33 +02:00
|
|
|
}
|
2022-07-01 11:36:13 -07:00
|
|
|
|
2022-07-11 23:38:19 +02:00
|
|
|
void power_gate_exit(void)
|
|
|
|
{
|
2023-02-20 11:22:36 +01:00
|
|
|
cpu_early_init();
|
2023-02-24 23:13:49 +01:00
|
|
|
sys_cache_data_flush_and_invd_all();
|
2022-07-11 23:38:19 +02:00
|
|
|
_restore_core_context();
|
|
|
|
}
|
|
|
|
|
|
|
|
__asm__(".align 4\n\t"
|
|
|
|
"dsp_restore_vector:\n\t"
|
|
|
|
" movi a0, 0\n\t"
|
|
|
|
" movi a1, 1\n\t"
|
|
|
|
" movi a2, 0x40020\n\t"/* PS_UM|PS_WOE */
|
|
|
|
" wsr a2, PS\n\t"
|
|
|
|
" wsr a1, WINDOWSTART\n\t"
|
|
|
|
" wsr a0, WINDOWBASE\n\t"
|
|
|
|
" rsync\n\t"
|
2023-02-24 22:18:28 +01:00
|
|
|
" movi a1, z_interrupt_stacks\n\t"
|
|
|
|
" rsr a2, PRID\n\t"
|
|
|
|
" movi a3, " STRINGIFY(CONFIG_ISR_STACK_SIZE) "\n\t"
|
|
|
|
" mull a2, a2, a3\n\t"
|
|
|
|
" add a2, a2, a3\n\t"
|
|
|
|
" add a1, a1, a2\n\t"
|
2022-07-11 23:38:19 +02:00
|
|
|
" call0 power_gate_exit\n\t");
|
|
|
|
|
2022-12-15 16:21:05 +01:00
|
|
|
#ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
|
|
|
|
static void ALWAYS_INLINE power_off_exit(void)
|
|
|
|
{
|
|
|
|
__asm__(
|
|
|
|
" movi a0, 0\n\t"
|
|
|
|
" movi a1, 1\n\t"
|
|
|
|
" movi a2, 0x40020\n\t"/* PS_UM|PS_WOE */
|
|
|
|
" wsr a2, PS\n\t"
|
|
|
|
" wsr a1, WINDOWSTART\n\t"
|
|
|
|
" wsr a0, WINDOWBASE\n\t"
|
|
|
|
" rsync\n\t");
|
|
|
|
_restore_core_context();
|
|
|
|
}
|
|
|
|
|
2022-11-22 13:17:04 +01:00
|
|
|
__imr void pm_state_imr_restore(void)
|
2022-10-24 15:59:12 +02:00
|
|
|
{
|
|
|
|
struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS);
|
|
|
|
/* restore lpsram power and contents */
|
2023-01-20 14:46:51 +01:00
|
|
|
bmemcpy(z_soc_uncached_ptr((__sparse_force void __sparse_cache *)
|
|
|
|
UINT_TO_POINTER(LP_SRAM_BASE)),
|
2022-10-24 15:59:12 +02:00
|
|
|
imr_layout->imr_state.header.imr_ram_storage,
|
|
|
|
LP_SRAM_SIZE);
|
|
|
|
|
|
|
|
/* restore HPSRAM contents, mapping and power states */
|
|
|
|
adsp_mm_restore_context(imr_layout->imr_state.header.imr_ram_storage+LP_SRAM_SIZE);
|
|
|
|
|
|
|
|
/* this function won't return, it will restore a saved state */
|
|
|
|
power_off_exit();
|
|
|
|
}
|
2022-12-15 16:21:05 +01:00
|
|
|
#endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
|
2022-10-24 15:59:12 +02:00
|
|
|
|
2022-07-01 11:36:13 -07:00
|
|
|
__weak void pm_state_set(enum pm_state state, uint8_t substate_id)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(substate_id);
|
2022-07-28 23:24:33 +02:00
|
|
|
uint32_t cpu = arch_proc_id();
|
2022-07-01 11:36:13 -07:00
|
|
|
|
2022-07-06 14:10:14 -07:00
|
|
|
if (state == PM_STATE_SOFT_OFF) {
|
2022-10-24 15:59:12 +02:00
|
|
|
/* save interrupt state and turn off all interrupts */
|
|
|
|
core_desc[cpu].intenable = XTENSA_RSR("INTENABLE");
|
|
|
|
z_xt_ints_off(0xffffffff);
|
2023-01-09 14:25:57 +01:00
|
|
|
core_desc[cpu].bctl = DSPCS.bootctl[cpu].bctl;
|
|
|
|
DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPCG;
|
2022-07-01 11:36:13 -07:00
|
|
|
soc_cpus_active[cpu] = false;
|
2023-04-18 13:44:01 +00:00
|
|
|
sys_cache_data_flush_and_invd_all();
|
2022-07-01 11:36:13 -07:00
|
|
|
if (cpu == 0) {
|
2022-12-30 15:18:30 +02:00
|
|
|
#ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
|
2022-10-24 15:59:12 +02:00
|
|
|
/* save storage and restore information to imr */
|
|
|
|
__ASSERT_NO_MSG(global_imr_ram_storage != NULL);
|
2022-12-30 15:18:30 +02:00
|
|
|
#endif
|
2022-10-24 15:59:12 +02:00
|
|
|
struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS);
|
|
|
|
|
|
|
|
imr_layout->imr_state.header.adsp_imr_magic = ADSP_IMR_MAGIC_VALUE;
|
2022-12-15 16:21:05 +01:00
|
|
|
#ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
|
2022-10-24 15:59:12 +02:00
|
|
|
imr_layout->imr_state.header.imr_restore_vector =
|
2022-11-22 12:59:10 +01:00
|
|
|
(void *)boot_entry_d3_restore;
|
2022-10-24 15:59:12 +02:00
|
|
|
imr_layout->imr_state.header.imr_ram_storage = global_imr_ram_storage;
|
2023-04-18 13:44:01 +00:00
|
|
|
sys_cache_data_flush_range(imr_layout, sizeof(*imr_layout));
|
2022-10-24 15:59:12 +02:00
|
|
|
|
|
|
|
/* save CPU context here
|
|
|
|
* when _restore_core_context() is called, it will return directly to
|
|
|
|
* the caller of this procedure
|
|
|
|
* any changes to CPU context after _save_core_context
|
|
|
|
* will be lost when power_down is executed
|
|
|
|
* Only data in the imr region survives
|
|
|
|
*/
|
|
|
|
xthal_window_spill();
|
|
|
|
_save_core_context(cpu);
|
|
|
|
|
|
|
|
/* save LPSRAM - a simple copy */
|
2022-11-22 13:17:04 +01:00
|
|
|
memcpy(global_imr_ram_storage, (void *)LP_SRAM_BASE, LP_SRAM_SIZE);
|
2022-10-24 15:59:12 +02:00
|
|
|
|
|
|
|
/* save HPSRAM - a multi step procedure, executed by a TLB driver
|
|
|
|
* the TLB driver will change memory mapping
|
|
|
|
* leaving the system not operational
|
|
|
|
* it must be called directly here,
|
|
|
|
* just before power_down
|
|
|
|
*/
|
|
|
|
const struct device *tlb_dev = DEVICE_DT_GET(DT_NODELABEL(tlb));
|
2022-07-01 11:36:13 -07:00
|
|
|
|
2022-10-24 15:59:12 +02:00
|
|
|
__ASSERT_NO_MSG(tlb_dev != NULL);
|
|
|
|
const struct intel_adsp_tlb_api *tlb_api =
|
|
|
|
(struct intel_adsp_tlb_api *)tlb_dev->api;
|
|
|
|
|
|
|
|
tlb_api->save_context(global_imr_ram_storage+LP_SRAM_SIZE);
|
2022-12-15 16:21:05 +01:00
|
|
|
#else
|
|
|
|
imr_layout->imr_state.header.imr_restore_vector =
|
|
|
|
(void *)rom_entry;
|
2023-04-18 13:44:01 +00:00
|
|
|
sys_cache_data_flush_range(imr_layout, sizeof(*imr_layout));
|
2022-12-15 16:21:05 +01:00
|
|
|
#endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
|
2022-10-24 15:59:12 +02:00
|
|
|
/* turn off all HPSRAM banks - get a full bitmap */
|
|
|
|
uint32_t ebb_banks = ace_hpsram_get_bank_count();
|
|
|
|
uint32_t hpsram_mask = (1 << ebb_banks) - 1;
|
|
|
|
/* do power down - this function won't return */
|
|
|
|
power_down(true, uncache_to_cache(&hpsram_mask),
|
2022-07-11 17:51:38 +02:00
|
|
|
true);
|
2022-07-01 11:36:13 -07:00
|
|
|
} else {
|
2023-01-27 15:05:42 +01:00
|
|
|
/* Temporary re-enabling interrupts before going to waiti. Right now
|
|
|
|
* secondary cores don't have proper context restore flow and after leaving
|
|
|
|
* D3 state core will return here and stuck.
|
|
|
|
*/
|
|
|
|
z_xt_ints_on(core_desc[cpu].intenable);
|
2022-07-01 11:36:13 -07:00
|
|
|
k_cpu_idle();
|
|
|
|
}
|
2022-07-28 23:24:33 +02:00
|
|
|
} else if (state == PM_STATE_RUNTIME_IDLE) {
|
2022-09-06 11:00:42 -07:00
|
|
|
core_desc[cpu].intenable = XTENSA_RSR("INTENABLE");
|
2022-07-28 23:24:33 +02:00
|
|
|
z_xt_ints_off(0xffffffff);
|
2023-01-09 14:25:57 +01:00
|
|
|
DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPPG;
|
|
|
|
DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPCG;
|
2022-07-28 23:24:33 +02:00
|
|
|
ACE_PWRCTL->wpdsphpxpg &= ~BIT(cpu);
|
|
|
|
if (cpu == 0) {
|
2023-01-09 14:25:57 +01:00
|
|
|
uint32_t battr = DSPCS.bootctl[cpu].battr & (~LPSCTL_BATTR_MASK);
|
2022-07-28 23:24:33 +02:00
|
|
|
|
2023-01-09 14:25:57 +01:00
|
|
|
battr |= (DSPBR_BATTR_LPSCTL_RESTORE_BOOT & LPSCTL_BATTR_MASK);
|
|
|
|
DSPCS.bootctl[cpu].battr = battr;
|
2022-07-28 23:24:33 +02:00
|
|
|
}
|
|
|
|
power_gate_entry(cpu);
|
2022-07-06 14:10:14 -07:00
|
|
|
} else {
|
2022-07-01 11:36:13 -07:00
|
|
|
__ASSERT(false, "invalid argument - unsupported power state");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle SOC specific activity after Low Power Mode Exit */
|
|
|
|
__weak void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(substate_id);
|
2022-07-28 23:24:33 +02:00
|
|
|
uint32_t cpu = arch_proc_id();
|
2022-07-01 11:36:13 -07:00
|
|
|
|
2022-07-06 14:10:14 -07:00
|
|
|
if (state == PM_STATE_SOFT_OFF) {
|
2023-01-11 11:54:15 +01:00
|
|
|
/* restore clock gating state */
|
|
|
|
DSPCS.bootctl[cpu].bctl |=
|
|
|
|
(core_desc[0].bctl & DSPBR_BCTL_WAITIPCG);
|
|
|
|
|
2022-12-15 16:21:05 +01:00
|
|
|
#ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE
|
2023-01-11 11:54:15 +01:00
|
|
|
if (cpu == 0) {
|
2022-10-24 15:59:12 +02:00
|
|
|
struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS);
|
|
|
|
|
|
|
|
/* clean storage and restore information */
|
2023-04-18 13:44:01 +00:00
|
|
|
sys_cache_data_invd_range(imr_layout, sizeof(*imr_layout));
|
2022-10-24 15:59:12 +02:00
|
|
|
imr_layout->imr_state.header.adsp_imr_magic = 0;
|
|
|
|
imr_layout->imr_state.header.imr_restore_vector = NULL;
|
|
|
|
imr_layout->imr_state.header.imr_ram_storage = NULL;
|
|
|
|
}
|
2023-01-11 11:54:15 +01:00
|
|
|
#endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
|
2022-10-24 15:59:12 +02:00
|
|
|
|
2023-01-11 11:54:15 +01:00
|
|
|
soc_cpus_active[cpu] = true;
|
2023-04-18 13:44:01 +00:00
|
|
|
sys_cache_data_flush_and_invd_all();
|
2023-01-11 11:54:15 +01:00
|
|
|
z_xt_ints_on(core_desc[cpu].intenable);
|
2022-07-28 23:24:33 +02:00
|
|
|
} else if (state == PM_STATE_RUNTIME_IDLE) {
|
|
|
|
if (cpu != 0) {
|
|
|
|
/* NOTE: HW should support dynamic power gating on secondary cores.
|
|
|
|
* But since there is no real profit from it, functionality is not
|
|
|
|
* fully implemented.
|
|
|
|
* SOF PM policy will not allowed primary core to enter d0i3 state
|
|
|
|
* when secondary cores are active.
|
|
|
|
*/
|
|
|
|
__ASSERT(false, "state not supported on secondary core");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ACE_PWRCTL->wpdsphpxpg |= BIT(cpu);
|
|
|
|
|
|
|
|
while ((ACE_PWRSTS->dsphpxpgs & BIT(cpu)) == 0) {
|
|
|
|
k_busy_wait(HW_STATE_CHECK_DELAY);
|
|
|
|
}
|
|
|
|
|
2023-01-09 14:25:57 +01:00
|
|
|
DSPCS.bootctl[cpu].bctl |=
|
|
|
|
DSPBR_BCTL_WAITIPCG | DSPBR_BCTL_WAITIPPG;
|
2022-07-28 23:24:33 +02:00
|
|
|
if (cpu == 0) {
|
2023-01-09 14:25:57 +01:00
|
|
|
DSPCS.bootctl[cpu].battr &= (~LPSCTL_BATTR_MASK);
|
2022-07-28 23:24:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
soc_cpus_active[cpu] = true;
|
2023-04-18 13:44:01 +00:00
|
|
|
sys_cache_data_flush_and_invd_all();
|
2022-07-28 23:24:33 +02:00
|
|
|
z_xt_ints_on(core_desc[cpu].intenable);
|
2022-07-06 14:10:14 -07:00
|
|
|
} else {
|
2022-07-01 11:36:13 -07:00
|
|
|
__ASSERT(false, "invalid argument - unsupported power state");
|
|
|
|
}
|
|
|
|
}
|
2022-07-29 21:45:52 -04:00
|
|
|
|
|
|
|
#endif
|