2021-01-18 12:19:49 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2021, Nordic Semiconductor ASA
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "rpmsg_backend.h"
|
|
|
|
|
includes: prefer <zephyr/kernel.h> over <zephyr/zephyr.h>
As of today <zephyr/zephyr.h> is 100% equivalent to <zephyr/kernel.h>.
This patch proposes to then include <zephyr/kernel.h> instead of
<zephyr/zephyr.h> since it is more clear that you are including the
Kernel APIs and (probably) nothing else. <zephyr/zephyr.h> sounds like a
catch-all header that may be confusing. Most applications need to
include a bunch of other things to compile, e.g. driver headers or
subsystem headers like BT, logging, etc.
The idea of a catch-all header in Zephyr is probably not feasible
anyway. Reason is that Zephyr is not a library, like it could be for
example `libpython`. Zephyr provides many utilities nowadays: a kernel,
drivers, subsystems, etc and things will likely grow. A catch-all header
would be massive, difficult to keep up-to-date. It is also likely that
an application will only build a small subset. Note that subsystem-level
headers may use a catch-all approach to make things easier, though.
NOTE: This patch is **NOT** removing the header, just removing its usage
in-tree. I'd advocate for its deprecation (add a #warning on it), but I
understand many people will have concerns.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2022-08-25 09:58:46 +02:00
|
|
|
#include <zephyr/kernel.h>
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/drivers/ipm.h>
|
|
|
|
#include <zephyr/device.h>
|
2023-08-31 11:39:19 +02:00
|
|
|
#include <zephyr/init.h>
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/logging/log.h>
|
2021-01-18 12:19:49 +01:00
|
|
|
|
|
|
|
#include <openamp/open_amp.h>
|
|
|
|
#include <metal/device.h>
|
|
|
|
|
|
|
|
#define LOG_MODULE_NAME rpmsg_backend
|
2021-03-03 10:08:07 +01:00
|
|
|
LOG_MODULE_REGISTER(LOG_MODULE_NAME, CONFIG_RPMSG_SERVICE_LOG_LEVEL);
|
2021-01-18 12:19:49 +01:00
|
|
|
|
|
|
|
/* Configuration defines */
|
|
|
|
#if !DT_HAS_CHOSEN(zephyr_ipc_shm)
|
|
|
|
#error "Module requires definition of shared memory for rpmsg"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define MASTER IS_ENABLED(CONFIG_RPMSG_SERVICE_MODE_MASTER)
|
|
|
|
|
|
|
|
#if MASTER
|
|
|
|
#define VIRTQUEUE_ID 0
|
2022-04-29 14:21:26 +02:00
|
|
|
#define RPMSG_ROLE RPMSG_HOST
|
2021-01-18 12:19:49 +01:00
|
|
|
#else
|
|
|
|
#define VIRTQUEUE_ID 1
|
|
|
|
#define RPMSG_ROLE RPMSG_REMOTE
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Configuration defines */
|
|
|
|
|
|
|
|
#define VRING_COUNT 2
|
|
|
|
#define VRING_RX_ADDRESS (VDEV_START_ADDR + SHM_SIZE - VDEV_STATUS_SIZE)
|
|
|
|
#define VRING_TX_ADDRESS (VDEV_START_ADDR + SHM_SIZE)
|
|
|
|
#define VRING_ALIGNMENT 4
|
|
|
|
#define VRING_SIZE 16
|
|
|
|
|
|
|
|
#define IPM_WORK_QUEUE_STACK_SIZE CONFIG_RPMSG_SERVICE_WORK_QUEUE_STACK_SIZE
|
2021-11-18 12:24:57 +01:00
|
|
|
#define IPM_WORK_QUEUE_PRIORITY K_HIGHEST_APPLICATION_THREAD_PRIO
|
2021-01-18 12:19:49 +01:00
|
|
|
|
|
|
|
K_THREAD_STACK_DEFINE(ipm_stack_area, IPM_WORK_QUEUE_STACK_SIZE);
|
|
|
|
|
|
|
|
struct k_work_q ipm_work_q;
|
|
|
|
|
|
|
|
/* End of configuration defines */
|
|
|
|
|
|
|
|
#if defined(CONFIG_RPMSG_SERVICE_DUAL_IPM_SUPPORT)
|
2022-08-17 15:30:36 +02:00
|
|
|
static const struct device *const ipm_tx_handle =
|
|
|
|
DEVICE_DT_GET(DT_CHOSEN(zephyr_ipc_tx));
|
|
|
|
static const struct device *const ipm_rx_handle =
|
|
|
|
DEVICE_DT_GET(DT_CHOSEN(zephyr_ipc_rx));
|
2021-01-18 12:19:49 +01:00
|
|
|
#elif defined(CONFIG_RPMSG_SERVICE_SINGLE_IPM_SUPPORT)
|
2022-08-17 15:30:36 +02:00
|
|
|
static const struct device *const ipm_handle =
|
|
|
|
DEVICE_DT_GET(DT_CHOSEN(zephyr_ipc));
|
2021-01-18 12:19:49 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
static metal_phys_addr_t shm_physmap[] = { SHM_START_ADDR };
|
|
|
|
static struct metal_device shm_device = {
|
|
|
|
.name = SHM_DEVICE_NAME,
|
|
|
|
.bus = NULL,
|
|
|
|
.num_regions = 1,
|
|
|
|
{
|
|
|
|
{
|
|
|
|
.virt = (void *) SHM_START_ADDR,
|
|
|
|
.physmap = shm_physmap,
|
|
|
|
.size = SHM_SIZE,
|
|
|
|
.page_shift = 0xffffffff,
|
|
|
|
.page_mask = 0xffffffff,
|
|
|
|
.mem_flags = 0,
|
|
|
|
.ops = { NULL },
|
|
|
|
},
|
|
|
|
},
|
|
|
|
.node = { NULL },
|
|
|
|
.irq_num = 0,
|
|
|
|
.irq_info = NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct virtio_vring_info rvrings[2] = {
|
|
|
|
[0] = {
|
|
|
|
.info.align = VRING_ALIGNMENT,
|
|
|
|
},
|
|
|
|
[1] = {
|
|
|
|
.info.align = VRING_ALIGNMENT,
|
|
|
|
},
|
|
|
|
};
|
2023-08-09 23:54:25 +02:00
|
|
|
static struct virtqueue *vqueue[2];
|
2021-01-18 12:19:49 +01:00
|
|
|
|
|
|
|
static struct k_work ipm_work;
|
|
|
|
|
2023-07-03 16:47:00 +02:00
|
|
|
static unsigned char ipc_virtio_get_status(struct virtio_device *vdev)
|
2021-01-18 12:19:49 +01:00
|
|
|
{
|
|
|
|
#if MASTER
|
|
|
|
return VIRTIO_CONFIG_STATUS_DRIVER_OK;
|
|
|
|
#else
|
|
|
|
return sys_read8(VDEV_STATUS_ADDR);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2023-07-03 16:47:00 +02:00
|
|
|
static void ipc_virtio_set_status(struct virtio_device *vdev, unsigned char status)
|
2021-01-18 12:19:49 +01:00
|
|
|
{
|
|
|
|
sys_write8(status, VDEV_STATUS_ADDR);
|
|
|
|
}
|
|
|
|
|
2023-07-03 16:47:00 +02:00
|
|
|
static uint32_t ipc_virtio_get_features(struct virtio_device *vdev)
|
2021-01-18 12:19:49 +01:00
|
|
|
{
|
|
|
|
return BIT(VIRTIO_RPMSG_F_NS);
|
|
|
|
}
|
|
|
|
|
2023-07-03 16:47:00 +02:00
|
|
|
static void ipc_virtio_set_features(struct virtio_device *vdev, uint32_t features)
|
2021-01-18 12:19:49 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2023-07-03 16:47:00 +02:00
|
|
|
static void ipc_virtio_notify(struct virtqueue *vq)
|
2021-01-18 12:19:49 +01:00
|
|
|
{
|
|
|
|
int status;
|
|
|
|
|
|
|
|
#if defined(CONFIG_RPMSG_SERVICE_DUAL_IPM_SUPPORT)
|
|
|
|
status = ipm_send(ipm_tx_handle, 0, 0, NULL, 0);
|
|
|
|
#elif defined(CONFIG_RPMSG_SERVICE_SINGLE_IPM_SUPPORT)
|
|
|
|
|
|
|
|
#if defined(CONFIG_SOC_MPS2_AN521) || \
|
|
|
|
defined(CONFIG_SOC_V2M_MUSCA_B1)
|
|
|
|
uint32_t current_core = sse_200_platform_get_cpu_id();
|
|
|
|
|
|
|
|
status = ipm_send(ipm_handle, 0, current_core ? 0 : 1, 0, 1);
|
2024-02-08 12:40:49 +01:00
|
|
|
#elif defined(CONFIG_IPM_STM32_HSEM)
|
|
|
|
/* No data transfer, only doorbell. */
|
|
|
|
status = ipm_send(ipm_handle, 0, 0, NULL, 0);
|
2021-01-18 12:19:49 +01:00
|
|
|
#else
|
2024-02-08 12:40:49 +01:00
|
|
|
/* The IPM interface is unclear on whether or not ipm_send
|
|
|
|
* can be called with NULL as data, thus, drivers might cause
|
|
|
|
* problems if you do. To avoid problems, we always send some
|
|
|
|
* dummy data, unless the IPM driver cannot transfer data.
|
|
|
|
* Ref: #68741
|
|
|
|
*/
|
|
|
|
uint32_t dummy_data = 0x55005500;
|
2021-01-18 12:19:49 +01:00
|
|
|
|
|
|
|
status = ipm_send(ipm_handle, 0, 0, &dummy_data, sizeof(dummy_data));
|
|
|
|
#endif /* #if defined(CONFIG_SOC_MPS2_AN521) */
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (status != 0) {
|
|
|
|
LOG_ERR("ipm_send failed to notify: %d", status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct virtio_dispatch dispatch = {
|
2023-07-03 16:47:00 +02:00
|
|
|
.get_status = ipc_virtio_get_status,
|
|
|
|
.set_status = ipc_virtio_set_status,
|
|
|
|
.get_features = ipc_virtio_get_features,
|
|
|
|
.set_features = ipc_virtio_set_features,
|
|
|
|
.notify = ipc_virtio_notify,
|
2021-01-18 12:19:49 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static void ipm_callback_process(struct k_work *work)
|
|
|
|
{
|
2023-08-09 23:54:25 +02:00
|
|
|
virtqueue_notification(vqueue[VIRTQUEUE_ID]);
|
2021-01-18 12:19:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ipm_callback(const struct device *dev,
|
|
|
|
void *context, uint32_t id,
|
|
|
|
volatile void *data)
|
|
|
|
{
|
|
|
|
(void)dev;
|
|
|
|
|
|
|
|
LOG_DBG("Got callback of id %u", id);
|
|
|
|
/* TODO: Separate workqueue is needed only
|
|
|
|
* for serialization master (app core)
|
|
|
|
*
|
|
|
|
* Use sysworkq to optimize memory footprint
|
|
|
|
* for serialization slave (net core)
|
|
|
|
*/
|
|
|
|
k_work_submit_to_queue(&ipm_work_q, &ipm_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
int rpmsg_backend_init(struct metal_io_region **io, struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
int32_t err;
|
|
|
|
struct metal_init_params metal_params = METAL_INIT_DEFAULTS;
|
|
|
|
struct metal_device *device;
|
|
|
|
|
|
|
|
/* Start IPM workqueue */
|
2021-04-20 14:50:06 +02:00
|
|
|
k_work_queue_start(&ipm_work_q, ipm_stack_area,
|
2021-01-18 12:19:49 +01:00
|
|
|
K_THREAD_STACK_SIZEOF(ipm_stack_area),
|
2021-04-20 14:50:06 +02:00
|
|
|
IPM_WORK_QUEUE_PRIORITY, NULL);
|
2021-01-18 12:19:49 +01:00
|
|
|
k_thread_name_set(&ipm_work_q.thread, "ipm_work_q");
|
|
|
|
|
|
|
|
/* Setup IPM workqueue item */
|
|
|
|
k_work_init(&ipm_work, ipm_callback_process);
|
|
|
|
|
|
|
|
/* Libmetal setup */
|
|
|
|
err = metal_init(&metal_params);
|
|
|
|
if (err) {
|
|
|
|
LOG_ERR("metal_init: failed - error code %d", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = metal_register_generic_device(&shm_device);
|
|
|
|
if (err) {
|
|
|
|
LOG_ERR("Couldn't register shared memory device: %d", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = metal_device_open("generic", SHM_DEVICE_NAME, &device);
|
|
|
|
if (err) {
|
|
|
|
LOG_ERR("metal_device_open failed: %d", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
*io = metal_device_io_region(device, 0);
|
|
|
|
if (!*io) {
|
|
|
|
LOG_ERR("metal_device_io_region failed to get region");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* IPM setup */
|
|
|
|
#if defined(CONFIG_RPMSG_SERVICE_DUAL_IPM_SUPPORT)
|
2022-08-03 22:42:38 +02:00
|
|
|
if (!device_is_ready(ipm_tx_handle)) {
|
|
|
|
LOG_ERR("IPM TX device is not ready");
|
2021-01-18 12:19:49 +01:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2022-08-03 22:42:38 +02:00
|
|
|
if (!device_is_ready(ipm_rx_handle)) {
|
|
|
|
LOG_ERR("IPM RX device is not ready");
|
2021-01-18 12:19:49 +01:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
ipm_register_callback(ipm_rx_handle, ipm_callback, NULL);
|
2021-08-25 15:20:44 +02:00
|
|
|
|
|
|
|
err = ipm_set_enabled(ipm_rx_handle, 1);
|
|
|
|
if (err != 0) {
|
|
|
|
LOG_ERR("Could not enable IPM interrupts and callbacks for RX");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-01-18 12:19:49 +01:00
|
|
|
#elif defined(CONFIG_RPMSG_SERVICE_SINGLE_IPM_SUPPORT)
|
2022-08-03 22:42:38 +02:00
|
|
|
if (!device_is_ready(ipm_handle)) {
|
|
|
|
LOG_ERR("IPM device is not ready");
|
2021-01-18 12:19:49 +01:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
ipm_register_callback(ipm_handle, ipm_callback, NULL);
|
|
|
|
|
|
|
|
err = ipm_set_enabled(ipm_handle, 1);
|
|
|
|
if (err != 0) {
|
|
|
|
LOG_ERR("Could not enable IPM interrupts and callbacks");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Virtqueue setup */
|
2023-08-09 23:54:25 +02:00
|
|
|
vqueue[0] = virtqueue_allocate(VRING_SIZE);
|
|
|
|
if (!vqueue[0]) {
|
|
|
|
LOG_ERR("virtqueue_allocate failed to alloc vqueue[0]");
|
2021-01-18 12:19:49 +01:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2023-08-09 23:54:25 +02:00
|
|
|
vqueue[1] = virtqueue_allocate(VRING_SIZE);
|
|
|
|
if (!vqueue[1]) {
|
|
|
|
LOG_ERR("virtqueue_allocate failed to alloc vqueue[1]");
|
2021-01-18 12:19:49 +01:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
rvrings[0].io = *io;
|
|
|
|
rvrings[0].info.vaddr = (void *)VRING_TX_ADDRESS;
|
|
|
|
rvrings[0].info.num_descs = VRING_SIZE;
|
|
|
|
rvrings[0].info.align = VRING_ALIGNMENT;
|
2023-08-09 23:54:25 +02:00
|
|
|
rvrings[0].vq = vqueue[0];
|
2021-01-18 12:19:49 +01:00
|
|
|
|
|
|
|
rvrings[1].io = *io;
|
|
|
|
rvrings[1].info.vaddr = (void *)VRING_RX_ADDRESS;
|
|
|
|
rvrings[1].info.num_descs = VRING_SIZE;
|
|
|
|
rvrings[1].info.align = VRING_ALIGNMENT;
|
2023-08-09 23:54:25 +02:00
|
|
|
rvrings[1].vq = vqueue[1];
|
2021-01-18 12:19:49 +01:00
|
|
|
|
|
|
|
vdev->role = RPMSG_ROLE;
|
|
|
|
vdev->vrings_num = VRING_COUNT;
|
|
|
|
vdev->func = &dispatch;
|
|
|
|
vdev->vrings_info = &rvrings[0];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if MASTER
|
|
|
|
/* Make sure we clear out the status flag very early (before we bringup the
|
|
|
|
* secondary core) so the secondary core see's the proper status
|
|
|
|
*/
|
init: remove the need for a dummy device pointer in SYS_INIT functions
The init infrastructure, found in `init.h`, is currently used by:
- `SYS_INIT`: to call functions before `main`
- `DEVICE_*`: to initialize devices
They are all sorted according to an initialization level + a priority.
`SYS_INIT` calls are really orthogonal to devices, however, the required
function signature requires a `const struct device *dev` as a first
argument. The only reason for that is because the same init machinery is
used by devices, so we have something like:
```c
struct init_entry {
int (*init)(const struct device *dev);
/* only set by DEVICE_*, otherwise NULL */
const struct device *dev;
}
```
As a result, we end up with such weird/ugly pattern:
```c
static int my_init(const struct device *dev)
{
/* always NULL! add ARG_UNUSED to avoid compiler warning */
ARG_UNUSED(dev);
...
}
```
This is really a result of poor internals isolation. This patch proposes
a to make init entries more flexible so that they can accept sytem
initialization calls like this:
```c
static int my_init(void)
{
...
}
```
This is achieved using a union:
```c
union init_function {
/* for SYS_INIT, used when init_entry.dev == NULL */
int (*sys)(void);
/* for DEVICE*, used when init_entry.dev != NULL */
int (*dev)(const struct device *dev);
};
struct init_entry {
/* stores init function (either for SYS_INIT or DEVICE*)
union init_function init_fn;
/* stores device pointer for DEVICE*, NULL for SYS_INIT. Allows
* to know which union entry to call.
*/
const struct device *dev;
}
```
This solution **does not increase ROM usage**, and allows to offer clean
public APIs for both SYS_INIT and DEVICE*. Note that however, init
machinery keeps a coupling with devices.
**NOTE**: This is a breaking change! All `SYS_INIT` functions will need
to be converted to the new signature. See the script offered in the
following commit.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
init: convert SYS_INIT functions to the new signature
Conversion scripted using scripts/utils/migrate_sys_init.py.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
manifest: update projects for SYS_INIT changes
Update modules with updated SYS_INIT calls:
- hal_ti
- lvgl
- sof
- TraceRecorderSource
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: devicetree: devices: adjust test
Adjust test according to the recently introduced SYS_INIT
infrastructure.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: kernel: threads: adjust SYS_INIT call
Adjust to the new signature: int (*init_fn)(void);
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2022-10-19 09:33:44 +02:00
|
|
|
int init_status_flag(void)
|
2021-01-18 12:19:49 +01:00
|
|
|
{
|
2023-07-03 16:47:00 +02:00
|
|
|
ipc_virtio_set_status(NULL, 0);
|
2021-01-18 12:19:49 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_INIT(init_status_flag, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
|
|
|
#endif /* MASTER */
|