diff --git a/CODEOWNERS b/CODEOWNERS index c0b10f3be82..5b41a17eacc 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -175,6 +175,7 @@ /drivers/ipm/ipm_mhu* @karl-zh /drivers/ipm/Kconfig.nrfx @masz-nordic @ioannisg /drivers/ipm/Kconfig.nrfx_ipc_channel @masz-nordic @ioannisg +/drivers/ipm/ipm_cavs_idc* @dcpleung /drivers/ipm/ipm_nrfx_ipc.c @masz-nordic @ioannisg /drivers/ipm/ipm_nrfx_ipc.h @masz-nordic @ioannisg /drivers/ipm/ipm_stm32_ipcc.c @arnopo diff --git a/drivers/ipm/CMakeLists.txt b/drivers/ipm/CMakeLists.txt index b753bb7a616..1007903d347 100644 --- a/drivers/ipm/CMakeLists.txt +++ b/drivers/ipm/CMakeLists.txt @@ -8,4 +8,6 @@ zephyr_library_sources_ifdef(CONFIG_IPM_MHU ipm_mhu.c) zephyr_library_sources_ifdef(CONFIG_IPM_STM32_IPCC ipm_stm32_ipcc.c) zephyr_library_sources_ifdef(CONFIG_IPM_NRFX ipm_nrfx_ipc.c) +zephyr_library_sources_if_kconfig(ipm_cavs_idc.c) + zephyr_library_sources_ifdef(CONFIG_USERSPACE ipm_handlers.c) diff --git a/drivers/ipm/Kconfig b/drivers/ipm/Kconfig index a4d174ad806..23bc011600b 100644 --- a/drivers/ipm/Kconfig +++ b/drivers/ipm/Kconfig @@ -99,6 +99,13 @@ config IPM_STM32_IPCC_PROCID help use to define the Processor ID for IPCC access +config IPM_CAVS_IDC + bool "CAVS DSP Intra-DSP Communication (IDC) driver" + depends on SMP && CAVS_ICTL + help + Driver for the Intra-DSP Communication (IDC) channel for + cross SoC communications. + module = IPM module-str = ipm source "subsys/logging/Kconfig.template.log_config" diff --git a/drivers/ipm/ipm_cavs_idc.c b/drivers/ipm/ipm_cavs_idc.c new file mode 100644 index 00000000000..d732d78d010 --- /dev/null +++ b/drivers/ipm/ipm_cavs_idc.c @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2020 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include + +#include + +#include "ipm_cavs_idc.h" +#include "ipm_cavs_idc_priv.h" + +#ifdef CONFIG_SCHED_IPI_SUPPORTED +extern void z_sched_ipi(void); +#endif + +struct cavs_idc_data { + ipm_callback_t cb; + void *ctx; +}; + +static struct device DEVICE_NAME_GET(cavs_idc); +static struct cavs_idc_data cavs_idc_device_data; + +static void cavs_idc_isr(struct device *dev) +{ + struct cavs_idc_data *drv_data = dev->driver_data; + + u32_t i, id; + void *ext; + u32_t idctfc; + u32_t curr_cpu_id = arch_curr_cpu()->id; +#ifdef CONFIG_SCHED_IPI_SUPPORTED + bool do_sched_ipi = false; +#endif + + for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) { + if (i == curr_cpu_id) { + /* skip current core */ + continue; + } + + idctfc = idc_read(REG_IDCTFC(i), curr_cpu_id); + + if ((idctfc & REG_IDCTFC_BUSY) == 0) { + /* No message from this core */ + continue; + } + + /* Extract the message */ + id = idctfc & REG_IDCTFC_MSG_MASK; + + switch (id) { +#ifdef CONFIG_SCHED_IPI_SUPPORTED + case IPM_CAVS_IDC_MSG_SCHED_IPI_ID: + do_sched_ipi = true; + break; +#endif + default: + if (drv_data->cb != NULL) { + ext = UINT_TO_POINTER( + idc_read(REG_IDCTEFC(i), curr_cpu_id) & + REG_IDCTEFC_MSG_MASK); + drv_data->cb(drv_data->ctx, id, ext); + } + break; + } + + /* Reset busy bit by writing to it */ + idctfc |= REG_IDCTFC_BUSY; + idc_write(REG_IDCTFC(i), curr_cpu_id, idctfc); + } +#ifdef CONFIG_SCHED_IPI_SUPPORTED + if (do_sched_ipi) { + z_sched_ipi(); + } +#endif +} + +static int cavs_idc_send(struct device *dev, int wait, u32_t id, + const void *data, int size) +{ + u32_t curr_cpu_id = arch_curr_cpu()->id; + u32_t ext = POINTER_TO_UINT(data); + u32_t reg; + bool busy; + int i; + + if ((wait != 0) || (size != 0)) { + return -ENOTSUP; + } + + /* Check if any core is still busy */ + busy = false; + for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) { + if (i == curr_cpu_id) { + /* skip current core */ + continue; + } + + reg = idc_read(REG_IDCITC(i), curr_cpu_id); + if ((reg & REG_IDCITC_BUSY) != 0) { + busy = true; + break; + } + } + + /* Can't send if busy */ + if (busy) { + return -EBUSY; + } + + id &= REG_IDCITC_MSG_MASK; + ext &= REG_IDCIETC_MSG_MASK; + ext |= REG_IDCIETC_DONE; /* always clear DONE bit */ + + for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) { + if (i == curr_cpu_id) { + /* skip current core */ + continue; + } + + idc_write(REG_IDCIETC(i), curr_cpu_id, ext); + idc_write(REG_IDCITC(i), curr_cpu_id, id | REG_IDCITC_BUSY); + } + + return 0; +} + +static int cavs_idc_max_data_size_get(struct device *dev) +{ + ARG_UNUSED(dev); + + /* IDC can send an ID (of 31 bits, the header) and + * another data of 30 bits (the extension). It cannot + * send a whole message over. Best we can do is send + * a 4-byte aligned pointer. + * + * So return 0 here for max data size. + */ + + return 0; +} + +static u32_t cavs_idc_max_id_val_get(struct device *dev) +{ + ARG_UNUSED(dev); + + return IPM_CAVS_IDC_ID_MASK; +} + +static void cavs_idc_register_callback(struct device *dev, ipm_callback_t cb, + void *context) +{ + struct cavs_idc_data *drv_data = dev->driver_data; + + drv_data->cb = cb; + drv_data->ctx = context; +} + +static int cavs_idc_set_enabled(struct device *dev, int enable) +{ + int i, j; + u32_t mask; + +#ifdef CONFIG_SCHED_IPI_SUPPORTED + /* With scheduler IPI, IDC must always be enabled. */ + if (enable == 0) { + return -ENOTSUP; + } +#endif + + for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) { + mask = 0; + + if (enable) { + for (j = 0; j < CONFIG_MP_NUM_CPUS; j++) { + if (i == j) { + continue; + } + + mask |= REG_IDCCTL_IDCTBIE(j); + } + } + + idc_write(REG_IDCCTL, i, mask); + + /* FIXME: when we have API to enable IRQ on specific core. */ + sys_set_bit(DT_CAVS_ICTL_BASE_ADDR + 0x04 + + CAVS_ICTL_INT_CPU_OFFSET(i), + CAVS_IRQ_NUMBER(DT_INST_0_INTEL_CAVS_IDC_IRQ_0)); + } + + return 0; +} + +static int cavs_idc_init(struct device *dev) +{ + IRQ_CONNECT(DT_INST_0_INTEL_CAVS_IDC_IRQ_0, + DT_INST_0_INTEL_CAVS_IDC_IRQ_0_PRIORITY, + cavs_idc_isr, DEVICE_GET(cavs_idc), 0); + + irq_enable(DT_INST_0_INTEL_CAVS_IDC_IRQ_0); + + return 0; +} + +static const struct ipm_driver_api cavs_idc_driver_api = { + .send = cavs_idc_send, + .register_callback = cavs_idc_register_callback, + .max_data_size_get = cavs_idc_max_data_size_get, + .max_id_val_get = cavs_idc_max_id_val_get, + .set_enabled = cavs_idc_set_enabled, +}; + +DEVICE_AND_API_INIT(IPM_CAVS_IDC_DEV_NAME, + DT_INST_0_INTEL_CAVS_IDC_LABEL, + &cavs_idc_init, &cavs_idc_device_data, NULL, + PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, + &cavs_idc_driver_api); + +#ifdef CONFIG_SCHED_IPI_SUPPORTED +static int cavs_idc_smp_init(struct device *dev) +{ + /* Enable IDC for scheduler IPI */ + cavs_idc_set_enabled(dev, 1); + + return 0; +} + +SYS_INIT(cavs_idc_smp_init, SMP, 0); +#endif diff --git a/drivers/ipm/ipm_cavs_idc.h b/drivers/ipm/ipm_cavs_idc.h new file mode 100644 index 00000000000..44fda68b475 --- /dev/null +++ b/drivers/ipm/ipm_cavs_idc.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2020 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_DRIVERS_IPM_IPM_CAVS_IDC_H_ +#define ZEPHYR_DRIVERS_IPM_IPM_CAVS_IDC_H_ + +/* For use with the IPM driver */ +#define IPM_CAVS_IDC_DEV_NAME cavs_idc + +#define IPM_CAVS_IDC_ID_MASK \ + (CAVS_IDC_TYPE(CAVS_IDC_TYPE_MASK) | \ + CAVS_IDC_HEADER(CAVS_IDC_HEADER_MASK)) + +/* IDC message type. */ +#define CAVS_IDC_TYPE_SHIFT 24U +#define CAVS_IDC_TYPE_MASK 0x7FU +#define CAVS_IDC_TYPE(x) \ + (((x) & CAVS_IDC_TYPE_MASK) << CAVS_IDC_TYPE_SHIFT) + +/* IDC message header. */ +#define CAVS_IDC_HEADER_MASK 0xFFFFFFU +#define CAVS_IDC_HEADER(x) ((x) & CAVS_IDC_HEADER_MASK) + +/* IDC message extension. */ +#define CAVS_IDC_EXTENSION_MASK 0x3FFFFFFFU +#define CAVS_IDC_EXTENSION(x) ((x) & CAVS_IDC_EXTENSION_MASK) + +/* Scheduler IPI message (type 0x7F, header 'IPI' in ascii) */ +#define IPM_CAVS_IDC_MSG_SCHED_IPI_DATA 0 +#define IPM_CAVS_IDC_MSG_SCHED_IPI_ID \ + (CAVS_IDC_TYPE(0x7FU) | CAVS_IDC_HEADER(0x495049U)) + +#endif /* ZEPHYR_DRIVERS_IPM_IPM_CAVS_IDC_H_ */ diff --git a/drivers/ipm/ipm_cavs_idc_priv.h b/drivers/ipm/ipm_cavs_idc_priv.h new file mode 100644 index 00000000000..cba161c9a91 --- /dev/null +++ b/drivers/ipm/ipm_cavs_idc_priv.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2020 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_DRIVERS_IPM_IPM_CAVS_IDC_PRIV_H_ +#define ZEPHYR_DRIVERS_IPM_IPM_CAVS_IDC_PRIV_H_ + +#define IDC_REG_SIZE DT_INST_0_INTEL_CAVS_IDC_SIZE +#define IDC_REG_BASE(x) \ + (DT_INST_0_INTEL_CAVS_IDC_BASE_ADDRESS + x * IDC_REG_SIZE) + +#define IDC_CPU_OFFSET 0x10 + +#define REG_IDCTFC(x) (0x0 + x * IDC_CPU_OFFSET) +#define REG_IDCTEFC(x) (0x4 + x * IDC_CPU_OFFSET) +#define REG_IDCITC(x) (0x8 + x * IDC_CPU_OFFSET) +#define REG_IDCIETC(x) (0xc + x * IDC_CPU_OFFSET) +#define REG_IDCCTL 0x50 + +#define REG_IDCTFC_BUSY (1 << 31) +#define REG_IDCTFC_MSG_MASK 0x7FFFFFFF + +#define REG_IDCTEFC_MSG_MASK 0x3FFFFFFF + +#define REG_IDCITC_BUSY (1 << 31) +#define REG_IDCITC_MSG_MASK 0x7FFFFFFF + +#define REG_IDCIETC_DONE (1 << 30) +#define REG_IDCIETC_MSG_MASK 0x3FFFFFFF + +#define REG_IDCCTL_IDCIDIE(x) (0x100 << (x)) +#define REG_IDCCTL_IDCTBIE(x) (0x1 << (x)) + +static inline u32_t idc_read(u32_t reg, u32_t core_id) +{ + return sys_read32(IDC_REG_BASE(core_id) + reg); +} + +static inline void idc_write(u32_t reg, u32_t core_id, u32_t val) +{ + sys_write32(val, IDC_REG_BASE(core_id) + reg); +} + +#endif /* ZEPHYR_DRIVERS_IPM_IPM_CAVS_IDC_PRIV_H_ */