soc: intel_adsp/cavs_v25: Add CPU halt and relaunch APIs

Add a SOC API to allow for application control over deep idle power
states.  Note that the hardware idle entry happens out of the WAITI
instruction, so the application has to be responsibile for ensuring
the CPU to be halted actually reaches idle deterministically.  Lots of
warnings in the docs to this effect.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2021-08-18 06:28:11 -07:00 committed by Anas Nashif
commit c6d077e1bc
4 changed files with 98 additions and 9 deletions

View file

@ -138,11 +138,8 @@ z_thread_return_value_set_with_data(struct k_thread *thread,
#ifdef CONFIG_SMP
extern void z_smp_init(void);
#if CONFIG_MP_NUM_CPUS > 1 && !defined(CONFIG_SMP_BOOT_DELAY)
extern void smp_timer_init(void);
#endif
#endif
extern void z_early_boot_rand_get(uint8_t *buf, size_t length);

View file

@ -255,6 +255,11 @@ static void init_idle_thread(int i)
#endif
}
void z_reinit_idle_thread(int i)
{
init_idle_thread(i);
}
/**
*
* @brief Initializes kernel data structures

View file

@ -66,8 +66,7 @@ void z_smp_thread_swap(void)
z_swap_unlocked();
}
#ifndef CONFIG_SMP_BOOT_DELAY
static FUNC_NORETURN void smp_init_top(void *arg)
static inline FUNC_NORETURN void smp_init_top(void *arg)
{
struct k_thread dummy_thread;
@ -78,7 +77,15 @@ static FUNC_NORETURN void smp_init_top(void *arg)
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}
#endif
void z_smp_start_cpu(int id)
{
(void)atomic_clear(&start_flag);
arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
smp_init_top, &start_flag);
(void)atomic_set(&start_flag, 1);
}
#endif
void z_smp_init(void)

View file

@ -28,6 +28,8 @@ LOG_MODULE_REGISTER(soc_mp, CONFIG_SOC_LOG_LEVEL);
#include <ipm/ipm_cavs_idc.h>
extern void z_sched_ipi(void);
extern void z_smp_start_cpu(int id);
extern void z_reinit_idle_thread(int i);
/* ROM wake version parsed by ROM during core wake up. */
#define IDC_ROM_WAKE_VERSION 0x2
@ -61,6 +63,8 @@ struct cpustart_rec {
uint32_t alive;
};
static struct k_spinlock mplock;
char *z_mp_stack_top;
#ifdef CONFIG_KERNEL_COHERENCE
@ -283,9 +287,7 @@ static ALWAYS_INLINE uint32_t prid(void)
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
arch_cpustart_t fn, void *arg)
{
uint32_t vecbase, curr_cpu;
__asm__ volatile("rsr %0, PRID" : "=r"(curr_cpu));
uint32_t vecbase, curr_cpu = prid();
#ifdef CONFIG_SOC_SERIES_INTEL_CAVS_V25
/* On cAVS v2.5, MP startup works differently. The core has
@ -455,3 +457,81 @@ void soc_idc_init(void)
}
}
/**
* @brief Restart halted SMP CPU
*
* Relaunches a CPU that has entered an idle power state via
* soc_halt_cpu(). Returns -EINVAL if the CPU is not in a power-gated
* idle state. Upon successful return, the CPU is online and
* available to run any Zephyr thread.
*
* @param id CPU to start, in the range [1:CONFIG_MP_NUM_CPUS)
*/
int soc_relaunch_cpu(int id)
{
volatile struct soc_dsp_shim_regs *shim = (void *)SOC_DSP_SHIM_REG_BASE;
int ret = 0;
k_spinlock_key_t k = k_spin_lock(&mplock);
if (id < 1 || id >= CONFIG_MP_NUM_CPUS) {
ret = -EINVAL;
goto out;
}
if (shim->pwrsts & BIT(id)) {
ret = -EINVAL;
goto out;
}
CAVS_INTCTRL[id].l2.clear = CAVS_L2_IDC;
z_reinit_idle_thread(id);
z_smp_start_cpu(id);
out:
k_spin_unlock(&mplock, k);
return ret;
}
/**
* @brief Halts and offlines a running CPU
*
* Enables power gating on the specified CPU, which cannot be the
* current CPU or CPU 0. The CPU must be idle; no application threads
* may be runnable on it when this function is called (or at least the
* CPU must be guaranteed to reach idle in finite time without
* deadlock). Actual CPU shutdown can only happen in the context of
* the idle thread, and synchronization is an application
* responsibility. This function will hang if the other CPU fails to
* reach idle.
*
* @param id CPU to halt, not current cpu or cpu 0
* @return 0 on success, -EINVAL on error
*/
int soc_halt_cpu(int id)
{
volatile struct soc_dsp_shim_regs *shim = (void *)SOC_DSP_SHIM_REG_BASE;
int ret = 0;
k_spinlock_key_t k = k_spin_lock(&mplock);
if (id == 0 || id == _current_cpu->id) {
ret = -EINVAL;
goto out;
}
/* Turn off the "prevent power/clock gating" bits, enabling
* low power idle, and mask off IDC interrupts so it will not
* be woken up by scheduler IPIs
*/
CAVS_INTCTRL[id].l2.set = CAVS_L2_IDC;
shim->pwrctl &= ~BIT(id);
shim->clkctl &= ~BIT(16 + id);
/* Wait for the CPU to reach an idle state before returing */
while (shim->pwrsts & BIT(id)) {
}
out:
k_spin_unlock(&mplock, k);
return ret;
}