arch: arm64: Reserved Cores
Enhanced arch_start_cpu so if a core is not available based on pm_cpu_on return value, booting does not halt. Instead the next core in cpu_node_list will be tried. If the number of CPU nodes described in the device tree is greater than CONFIG_MP_MAX_NUM_CPUS then the extra cores will be reserved and used if any previous cores in the cpu_node_list fail to power on. If the number of cores described in the device tree matches CONFIG_MP_MAX_NUM_CPUS then no cores are in reserve and booting will behave as previous, it will halt. Signed-off-by: Chad Karaginides <quic_chadk@quicinc.com>
This commit is contained in:
parent
4d467af7f4
commit
f5ff62f35a
2 changed files with 45 additions and 25 deletions
|
@ -159,6 +159,15 @@ config ARM64_SAFE_EXCEPTION_STACK_SIZE
|
||||||
The stack size of the safe exception stack. The safe exception stack
|
The stack size of the safe exception stack. The safe exception stack
|
||||||
requires to be enough to do the stack overflow check.
|
requires to be enough to do the stack overflow check.
|
||||||
|
|
||||||
|
config ARM64_FALLBACK_ON_RESERVED_CORES
|
||||||
|
bool "To enable fallback on reserved cores"
|
||||||
|
help
|
||||||
|
Give the ability to define more cores in the device tree than required
|
||||||
|
via CONFIG_MP_MAX_NUM_CPUS. The extra cores in the device tree
|
||||||
|
become reserved. If there is an issue powering on a core during boot
|
||||||
|
then that core will be skipped and the next core in the device tree
|
||||||
|
will be used.
|
||||||
|
|
||||||
if CPU_CORTEX_A
|
if CPU_CORTEX_A
|
||||||
|
|
||||||
config ARMV8_A_NS
|
config ARMV8_A_NS
|
||||||
|
|
|
@ -63,7 +63,8 @@ extern void z_arm64_mm_init(bool is_primary_core);
|
||||||
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||||
arch_cpustart_t fn, void *arg)
|
arch_cpustart_t fn, void *arg)
|
||||||
{
|
{
|
||||||
int cpu_count, i, j;
|
int cpu_count;
|
||||||
|
static int i;
|
||||||
uint64_t cpu_mpid = 0;
|
uint64_t cpu_mpid = 0;
|
||||||
uint64_t master_core_mpid;
|
uint64_t master_core_mpid;
|
||||||
|
|
||||||
|
@ -72,41 +73,51 @@ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||||
master_core_mpid = MPIDR_TO_CORE(GET_MPIDR());
|
master_core_mpid = MPIDR_TO_CORE(GET_MPIDR());
|
||||||
|
|
||||||
cpu_count = ARRAY_SIZE(cpu_node_list);
|
cpu_count = ARRAY_SIZE(cpu_node_list);
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARM64_FALLBACK_ON_RESERVED_CORES
|
||||||
|
__ASSERT(cpu_count >= CONFIG_MP_MAX_NUM_CPUS,
|
||||||
|
"The count of CPU Core nodes in dts is not greater or equal to CONFIG_MP_MAX_NUM_CPUS\n");
|
||||||
|
#else
|
||||||
__ASSERT(cpu_count == CONFIG_MP_MAX_NUM_CPUS,
|
__ASSERT(cpu_count == CONFIG_MP_MAX_NUM_CPUS,
|
||||||
"The count of CPU Cores nodes in dts is not equal to CONFIG_MP_MAX_NUM_CPUS\n");
|
"The count of CPU Cores nodes in dts is not equal to CONFIG_MP_MAX_NUM_CPUS\n");
|
||||||
|
#endif
|
||||||
for (i = 0, j = 0; i < cpu_count; i++) {
|
|
||||||
if (cpu_node_list[i] == master_core_mpid) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (j == cpu_num - 1) {
|
|
||||||
cpu_mpid = cpu_node_list[i];
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
j++;
|
|
||||||
}
|
|
||||||
if (i == cpu_count) {
|
|
||||||
printk("Can't find CPU Core %d from dts and failed to boot it\n", cpu_num);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
arm64_cpu_boot_params.sp = Z_KERNEL_STACK_BUFFER(stack) + sz;
|
arm64_cpu_boot_params.sp = Z_KERNEL_STACK_BUFFER(stack) + sz;
|
||||||
arm64_cpu_boot_params.fn = fn;
|
arm64_cpu_boot_params.fn = fn;
|
||||||
arm64_cpu_boot_params.arg = arg;
|
arm64_cpu_boot_params.arg = arg;
|
||||||
arm64_cpu_boot_params.cpu_num = cpu_num;
|
arm64_cpu_boot_params.cpu_num = cpu_num;
|
||||||
|
|
||||||
barrier_dsync_fence_full();
|
for (; i < cpu_count; i++) {
|
||||||
|
if (cpu_node_list[i] == master_core_mpid) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
/* store mpid last as this is our synchronization point */
|
cpu_mpid = cpu_node_list[i];
|
||||||
arm64_cpu_boot_params.mpid = cpu_mpid;
|
|
||||||
|
|
||||||
sys_cache_data_invd_range((void *)&arm64_cpu_boot_params,
|
barrier_dsync_fence_full();
|
||||||
sizeof(arm64_cpu_boot_params));
|
|
||||||
|
|
||||||
if (pm_cpu_on(cpu_mpid, (uint64_t)&__start)) {
|
/* store mpid last as this is our synchronization point */
|
||||||
printk("Failed to boot secondary CPU core %d (MPID:%#llx)\n",
|
arm64_cpu_boot_params.mpid = cpu_mpid;
|
||||||
cpu_num, cpu_mpid);
|
|
||||||
return;
|
sys_cache_data_invd_range((void *)&arm64_cpu_boot_params,
|
||||||
|
sizeof(arm64_cpu_boot_params));
|
||||||
|
|
||||||
|
if (pm_cpu_on(cpu_mpid, (uint64_t)&__start)) {
|
||||||
|
printk("Failed to boot secondary CPU core %d (MPID:%#llx)\n",
|
||||||
|
cpu_num, cpu_mpid);
|
||||||
|
#ifdef CONFIG_ARM64_FALLBACK_ON_RESERVED_CORES
|
||||||
|
printk("Falling back on reserved cores\n");
|
||||||
|
continue;
|
||||||
|
#else
|
||||||
|
k_panic();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (i++ == cpu_count) {
|
||||||
|
printk("Can't find CPU Core %d from dts and failed to boot it\n", cpu_num);
|
||||||
|
k_panic();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wait secondary cores up, see z_arm64_secondary_start */
|
/* Wait secondary cores up, see z_arm64_secondary_start */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue