2019-07-25 12:13:13 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2019 Synopsys.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
2019-10-14 22:29:19 +08:00
|
|
|
* @brief codes required for ARC multicore and Zephyr smp support
|
2019-07-25 12:13:13 +08:00
|
|
|
*
|
|
|
|
*/
|
2022-05-06 10:49:15 +02:00
|
|
|
#include <zephyr/device.h>
|
|
|
|
#include <zephyr/kernel.h>
|
|
|
|
#include <zephyr/kernel_structs.h>
|
2019-07-25 12:13:13 +08:00
|
|
|
#include <ksched.h>
|
2022-05-06 10:49:15 +02:00
|
|
|
#include <zephyr/init.h>
|
2022-10-17 10:24:11 +02:00
|
|
|
#include <zephyr/irq.h>
|
2022-12-12 13:15:43 +04:00
|
|
|
#include <arc_irq_offload.h>
|
2019-07-25 12:13:13 +08:00
|
|
|
|
|
|
|
volatile struct {
|
2020-01-10 12:51:38 -08:00
|
|
|
arch_cpustart_t fn;
|
2019-07-25 12:13:13 +08:00
|
|
|
void *arg;
|
2022-10-12 10:55:36 -05:00
|
|
|
} arc_cpu_init[CONFIG_MP_MAX_NUM_CPUS];
|
2019-07-25 12:13:13 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* arc_cpu_wake_flag is used to sync up master core and slave cores
|
|
|
|
* Slave core will spin for arc_cpu_wake_flag until master core sets
|
|
|
|
* it to the core id of slave core. Then, slave core clears it to notify
|
|
|
|
* master core that it's waken
|
|
|
|
*
|
|
|
|
*/
|
2020-05-27 11:26:57 -05:00
|
|
|
volatile uint32_t arc_cpu_wake_flag;
|
2019-10-14 22:29:19 +08:00
|
|
|
|
|
|
|
volatile char *arc_cpu_sp;
|
2019-07-25 12:13:13 +08:00
|
|
|
/*
|
2019-08-13 23:25:34 +08:00
|
|
|
* _curr_cpu is used to record the struct of _cpu_t of each cpu.
|
|
|
|
* for efficient usage in assembly
|
2019-07-25 12:13:13 +08:00
|
|
|
*/
|
2022-10-12 10:55:36 -05:00
|
|
|
volatile _cpu_t *_curr_cpu[CONFIG_MP_MAX_NUM_CPUS];
|
2019-07-25 12:13:13 +08:00
|
|
|
|
|
|
|
/* Called from Zephyr initialization */
|
2023-11-08 09:05:17 -08:00
|
|
|
void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
|
2020-01-10 12:51:38 -08:00
|
|
|
arch_cpustart_t fn, void *arg)
|
2019-07-25 12:13:13 +08:00
|
|
|
{
|
2019-08-13 23:25:34 +08:00
|
|
|
_curr_cpu[cpu_num] = &(_kernel.cpus[cpu_num]);
|
2019-07-25 12:13:13 +08:00
|
|
|
arc_cpu_init[cpu_num].fn = fn;
|
|
|
|
arc_cpu_init[cpu_num].arg = arg;
|
|
|
|
|
2019-10-14 22:29:19 +08:00
|
|
|
/* set the initial sp of target sp through arc_cpu_sp
|
|
|
|
* arc_cpu_wake_flag will protect arc_cpu_sp that
|
|
|
|
* only one slave cpu can read it per time
|
|
|
|
*/
|
2024-03-22 12:56:12 -07:00
|
|
|
arc_cpu_sp = K_KERNEL_STACK_BUFFER(stack) + sz;
|
2019-10-14 22:29:19 +08:00
|
|
|
|
2019-07-25 12:13:13 +08:00
|
|
|
arc_cpu_wake_flag = cpu_num;
|
|
|
|
|
|
|
|
/* wait slave cpu to start */
|
2021-04-02 22:47:49 -07:00
|
|
|
while (arc_cpu_wake_flag != 0U) {
|
2019-07-25 12:13:13 +08:00
|
|
|
;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-05 16:28:56 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static void arc_connect_debug_mask_update(int cpu_num)
|
|
|
|
{
|
|
|
|
uint32_t core_mask = 1 << cpu_num;
|
|
|
|
|
2022-04-28 17:01:34 +04:00
|
|
|
/*
|
|
|
|
* MDB debugger may modify debug_select and debug_mask registers on start, so we can't
|
|
|
|
* rely on debug_select reset value.
|
|
|
|
*/
|
2022-11-19 21:10:23 +04:00
|
|
|
if (cpu_num != ARC_MP_PRIMARY_CPU_ID) {
|
2022-04-28 17:01:34 +04:00
|
|
|
core_mask |= z_arc_connect_debug_select_read();
|
|
|
|
}
|
|
|
|
|
2021-07-05 16:28:56 +08:00
|
|
|
z_arc_connect_debug_select_set(core_mask);
|
|
|
|
/* Debugger halts cores at all conditions:
|
|
|
|
* ARC_CONNECT_CMD_DEBUG_MASK_H: Core global halt.
|
|
|
|
* ARC_CONNECT_CMD_DEBUG_MASK_AH: Actionpoint halt.
|
|
|
|
* ARC_CONNECT_CMD_DEBUG_MASK_BH: Software breakpoint halt.
|
|
|
|
* ARC_CONNECT_CMD_DEBUG_MASK_SH: Self halt.
|
|
|
|
*/
|
|
|
|
z_arc_connect_debug_mask_set(core_mask, (ARC_CONNECT_CMD_DEBUG_MASK_SH
|
|
|
|
| ARC_CONNECT_CMD_DEBUG_MASK_BH | ARC_CONNECT_CMD_DEBUG_MASK_AH
|
|
|
|
| ARC_CONNECT_CMD_DEBUG_MASK_H));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-11-19 13:36:24 +04:00
|
|
|
void arc_core_private_intc_init(void);
|
|
|
|
|
2019-07-25 12:13:13 +08:00
|
|
|
/* the C entry of slave cores */
|
2023-12-13 15:27:41 -05:00
|
|
|
void arch_secondary_cpu_init(int cpu_num)
|
2019-07-25 12:13:13 +08:00
|
|
|
{
|
2020-01-10 12:51:38 -08:00
|
|
|
arch_cpustart_t fn;
|
2019-07-25 12:13:13 +08:00
|
|
|
|
2019-10-14 22:29:19 +08:00
|
|
|
#ifdef CONFIG_SMP
|
2021-07-05 16:28:56 +08:00
|
|
|
struct arc_connect_bcr bcr;
|
|
|
|
|
|
|
|
bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR);
|
|
|
|
|
|
|
|
if (bcr.dbg) {
|
|
|
|
/* configure inter-core debug unit if available */
|
|
|
|
arc_connect_debug_mask_update(cpu_num);
|
|
|
|
}
|
|
|
|
|
2019-07-25 12:13:13 +08:00
|
|
|
z_irq_setup();
|
|
|
|
|
2022-11-19 13:36:24 +04:00
|
|
|
arc_core_private_intc_init();
|
|
|
|
|
2022-12-12 13:15:43 +04:00
|
|
|
arc_irq_offload_init_smp();
|
|
|
|
|
2019-10-25 01:04:39 +08:00
|
|
|
z_arc_connect_ici_clear();
|
2022-06-15 16:34:01 +02:00
|
|
|
z_irq_priority_set(DT_IRQN(DT_NODELABEL(ici)),
|
|
|
|
DT_IRQ(DT_NODELABEL(ici), priority), 0);
|
|
|
|
irq_enable(DT_IRQN(DT_NODELABEL(ici)));
|
2019-10-14 22:29:19 +08:00
|
|
|
#endif
|
2023-11-08 09:05:17 -08:00
|
|
|
/* call the function set by arch_cpu_start */
|
2019-07-25 12:13:13 +08:00
|
|
|
fn = arc_cpu_init[cpu_num].fn;
|
|
|
|
|
2020-01-10 12:51:38 -08:00
|
|
|
fn(arc_cpu_init[cpu_num].arg);
|
2019-07-25 12:13:13 +08:00
|
|
|
}
|
|
|
|
|
2019-10-14 22:29:19 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void sched_ipi_handler(const void *unused)
|
2019-10-14 22:29:19 +08:00
|
|
|
{
|
|
|
|
ARG_UNUSED(unused);
|
|
|
|
|
|
|
|
z_arc_connect_ici_clear();
|
|
|
|
z_sched_ipi();
|
|
|
|
}
|
|
|
|
|
2019-07-25 12:13:13 +08:00
|
|
|
/* arch implementation of sched_ipi */
|
2019-11-07 12:43:29 -08:00
|
|
|
void arch_sched_ipi(void)
|
2019-07-25 12:13:13 +08:00
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t i;
|
2019-07-25 12:13:13 +08:00
|
|
|
|
2019-10-14 22:11:12 +08:00
|
|
|
/* broadcast sched_ipi request to other cores
|
2019-07-25 12:13:13 +08:00
|
|
|
* if the target is current core, hardware will ignore it
|
|
|
|
*/
|
2022-10-18 09:45:13 -05:00
|
|
|
unsigned int num_cpus = arch_num_cpus();
|
|
|
|
|
|
|
|
for (i = 0U; i < num_cpus; i++) {
|
2019-07-25 12:13:13 +08:00
|
|
|
z_arc_connect_ici_generate(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-13 12:53:49 -05:00
|
|
|
int arch_smp_init(void)
|
2019-07-25 12:13:13 +08:00
|
|
|
{
|
|
|
|
struct arc_connect_bcr bcr;
|
|
|
|
|
|
|
|
/* necessary master core init */
|
2019-08-13 23:25:34 +08:00
|
|
|
_curr_cpu[0] = &(_kernel.cpus[0]);
|
2019-07-25 12:13:13 +08:00
|
|
|
|
|
|
|
bcr.val = z_arc_v2_aux_reg_read(_ARC_V2_CONNECT_BCR);
|
|
|
|
|
2021-07-05 16:28:56 +08:00
|
|
|
if (bcr.dbg) {
|
|
|
|
/* configure inter-core debug unit if available */
|
2022-11-19 21:10:23 +04:00
|
|
|
arc_connect_debug_mask_update(ARC_MP_PRIMARY_CPU_ID);
|
2021-07-05 16:28:56 +08:00
|
|
|
}
|
|
|
|
|
2019-07-25 12:13:13 +08:00
|
|
|
if (bcr.ipi) {
|
|
|
|
/* register ici interrupt, just need master core to register once */
|
2019-10-25 01:04:39 +08:00
|
|
|
z_arc_connect_ici_clear();
|
2022-06-15 16:34:01 +02:00
|
|
|
IRQ_CONNECT(DT_IRQN(DT_NODELABEL(ici)),
|
|
|
|
DT_IRQ(DT_NODELABEL(ici), priority),
|
|
|
|
sched_ipi_handler, NULL, 0);
|
2019-07-25 12:13:13 +08:00
|
|
|
|
2022-06-15 16:34:01 +02:00
|
|
|
irq_enable(DT_IRQN(DT_NODELABEL(ici)));
|
2019-07-25 12:13:13 +08:00
|
|
|
} else {
|
|
|
|
__ASSERT(0,
|
|
|
|
"ARC connect has no inter-core interrupt\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bcr.gfrc) {
|
|
|
|
/* global free running count init */
|
|
|
|
z_arc_connect_gfrc_enable();
|
|
|
|
|
|
|
|
/* when all cores halt, gfrc halt */
|
2022-10-24 14:05:55 -05:00
|
|
|
z_arc_connect_gfrc_core_set((1 << arch_num_cpus()) - 1);
|
2019-07-25 12:13:13 +08:00
|
|
|
z_arc_connect_gfrc_clear();
|
|
|
|
} else {
|
|
|
|
__ASSERT(0,
|
|
|
|
"ARC connect has no global free running counter\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2023-12-13 12:53:49 -05:00
|
|
|
SYS_INIT(arch_smp_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
2019-10-14 22:29:19 +08:00
|
|
|
#endif
|