diff --git a/dts/bindings/ipc/zephyr,ipc-openamp-static-vrings.yaml b/dts/bindings/ipc/zephyr,ipc-openamp-static-vrings.yaml new file mode 100644 index 00000000000..4a4d0524f05 --- /dev/null +++ b/dts/bindings/ipc/zephyr,ipc-openamp-static-vrings.yaml @@ -0,0 +1,30 @@ +# Copyright (c) 2021 Carlo Caione +# SPDX-License-Identifier: Apache-2.0 + +description: OpenAMP (RPMsg with static VRINGs) backend + +compatible: "zephyr,ipc-openamp-static-vrings" + +include: base.yaml + +properties: + role: + description: OpenAMP roles + required: true + type: string + enum: + - host + - remote + + memory-region: + description: phandle to the shared memory region + required: true + type: phandle + + mboxes: + description: phandle to the MBOX controller (TX and RX are required) + required: true + + mbox-names: + description: MBOX channel names (must be called "tx" and "rx") + required: true diff --git a/subsys/ipc/ipc_service/backends/CMakeLists.txt b/subsys/ipc/ipc_service/backends/CMakeLists.txt index ef79506da96..d0f5d0212c7 100644 --- a/subsys/ipc/ipc_service/backends/CMakeLists.txt +++ b/subsys/ipc/ipc_service/backends/CMakeLists.txt @@ -1,3 +1,4 @@ # SPDX-License-Identifier: Apache-2.0 zephyr_sources_ifdef(CONFIG_IPC_SERVICE_BACKEND_RPMSG_MI ipc_rpmsg_static_vrings_mi.c) +zephyr_sources_ifdef(CONFIG_IPC_SERVICE_BACKEND_RPMSG ipc_rpmsg_static_vrings.c) diff --git a/subsys/ipc/ipc_service/backends/Kconfig b/subsys/ipc/ipc_service/backends/Kconfig index 01c7b50deeb..404874ad99a 100644 --- a/subsys/ipc/ipc_service/backends/Kconfig +++ b/subsys/ipc/ipc_service/backends/Kconfig @@ -11,6 +11,13 @@ config IPC_SERVICE_BACKEND_RPMSG_MI select OPENAMP select IPM +config IPC_SERVICE_BACKEND_RPMSG + bool "OpenAMP RPMSG backend with static VRINGs" + depends on MBOX + select IPC_SERVICE_RPMSG + select IPC_SERVICE_STATIC_VRINGS + select OPENAMP + config IPC_SERVICE_BACKEND_ZTEST depends on ZTEST bool "IPC service backend test" @@ -39,4 +46,12 @@ config IPC_SERVICE_STATIC_VRINGS help "Static VRINGs library" +config IPC_SERVICE_STATIC_VRINGS_ALIGNMENT + int "VRINGs alignment" + depends on IPC_SERVICE_STATIC_VRINGS + default 4 + help + Static VRINGs alignment + rsource "Kconfig.rpmsg_mi" +rsource "Kconfig.rpmsg" diff --git a/subsys/ipc/ipc_service/backends/Kconfig.rpmsg b/subsys/ipc/ipc_service/backends/Kconfig.rpmsg new file mode 100644 index 00000000000..b5104ca45a7 --- /dev/null +++ b/subsys/ipc/ipc_service/backends/Kconfig.rpmsg @@ -0,0 +1,15 @@ +# Copyright (c) 2021 Carlo Caione +# SPDX-License-Identifier: Apache-2.0 + +if IPC_SERVICE_BACKEND_RPMSG + +config IPC_SERVICE_BACKEND_RPMSG_WQ_STACK_SIZE + int "Size of RX work queue stack" + default 1024 + help + Size of stack used by work queue RX thread. This work queue is + created in the multi-instance / multi-core RPMsg backend module to + prevent notifying service users about received data from the system + work queue. Size is the same for all instances. + +endif # IP_SERVICE_BACKEND_RPMSG diff --git a/subsys/ipc/ipc_service/backends/ipc_rpmsg_static_vrings.c b/subsys/ipc/ipc_service/backends/ipc_rpmsg_static_vrings.c new file mode 100644 index 00000000000..fc7dc926434 --- /dev/null +++ b/subsys/ipc/ipc_service/backends/ipc_rpmsg_static_vrings.c @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2021 Carlo Caione + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "ipc_rpmsg_static_vrings.h" + +#define DT_DRV_COMPAT zephyr_ipc_openamp_static_vrings + +#define WQ_PRIORITY (0) +#define WQ_STACK_SIZE CONFIG_IPC_SERVICE_BACKEND_RPMSG_WQ_STACK_SIZE + +#define STATE_READY (0) +#define STATE_BUSY (1) +#define STATE_INITED (2) + +K_KERNEL_STACK_DEFINE(mbox_stack, WQ_STACK_SIZE); +static struct k_work_q mbox_wq; + +struct backend_data_t { + /* RPMsg */ + struct ipc_rpmsg_instance rpmsg_inst; + + /* Static VRINGs */ + struct ipc_static_vrings vr; + + /* General */ + struct k_work mbox_work; + unsigned int role; + atomic_t state; +}; + +struct backend_config_t { + unsigned int role; + uintptr_t shm_addr; + size_t shm_size; + struct mbox_channel mbox_tx; + struct mbox_channel mbox_rx; +}; + +static void rpmsg_service_unbind(struct rpmsg_endpoint *ep) +{ + rpmsg_destroy_ept(ep); +} + +static struct ipc_rpmsg_ept *get_ept_slot_with_name(struct ipc_rpmsg_instance *rpmsg_inst, + const char *name) +{ + struct ipc_rpmsg_ept *rpmsg_ept; + + for (size_t i = 0; i < NUM_ENDPOINTS; i++) { + rpmsg_ept = &rpmsg_inst->endpoint[i]; + + if (strcmp(name, rpmsg_ept->name) == 0) { + return &rpmsg_inst->endpoint[i]; + } + } + + return NULL; +} + +static struct ipc_rpmsg_ept *get_available_ept_slot(struct ipc_rpmsg_instance *rpmsg_inst) +{ + return get_ept_slot_with_name(rpmsg_inst, ""); +} + +/* + * Returns: + * - true: when the endpoint was already cached / registered + * - false: when the endpoint was never registered before + * + * Returns in **rpmsg_ept: + * - The endpoint with the name *name if it exists + * - The first endpoint slot available when the endpoint with name *name does + * not exist + * - NULL in case of error + */ +static bool get_ept(struct ipc_rpmsg_instance *rpmsg_inst, + struct ipc_rpmsg_ept **rpmsg_ept, const char *name) +{ + struct ipc_rpmsg_ept *ept; + + ept = get_ept_slot_with_name(rpmsg_inst, name); + if (ept != NULL) { + (*rpmsg_ept) = ept; + return true; + } + + ept = get_available_ept_slot(rpmsg_inst); + if (ept != NULL) { + (*rpmsg_ept) = ept; + return false; + } + + (*rpmsg_ept) = NULL; + + return false; +} + +static void advertise_ept(struct ipc_rpmsg_instance *rpmsg_inst, struct ipc_rpmsg_ept *rpmsg_ept, + const char *name, uint32_t dest) +{ + struct rpmsg_device *rdev; + int err; + + rdev = rpmsg_virtio_get_rpmsg_device(&rpmsg_inst->rvdev); + + err = rpmsg_create_ept(&rpmsg_ept->ep, rdev, name, RPMSG_ADDR_ANY, + dest, rpmsg_inst->cb, rpmsg_service_unbind); + if (err != 0) { + return; + } + + rpmsg_ept->bound = true; + if (rpmsg_inst->bound_cb) { + rpmsg_inst->bound_cb(rpmsg_ept); + } +} + +static void ns_bind_cb(struct rpmsg_device *rdev, const char *name, uint32_t dest) +{ + struct ipc_rpmsg_instance *rpmsg_inst; + struct rpmsg_virtio_device *p_rvdev; + struct ipc_rpmsg_ept *rpmsg_ept; + bool ept_cached; + + p_rvdev = CONTAINER_OF(rdev, struct rpmsg_virtio_device, rdev); + rpmsg_inst = CONTAINER_OF(p_rvdev->shpool, struct ipc_rpmsg_instance, shm_pool); + + if (name == NULL || name[0] == '\0') { + return; + } + + k_mutex_lock(&rpmsg_inst->mtx, K_FOREVER); + ept_cached = get_ept(rpmsg_inst, &rpmsg_ept, name); + + if (rpmsg_ept == NULL) { + k_mutex_unlock(&rpmsg_inst->mtx); + return; + } + + if (ept_cached) { + /* + * The endpoint was already registered by the HOST core. The + * endpoint can now be advertised to the REMOTE core. + */ + k_mutex_unlock(&rpmsg_inst->mtx); + advertise_ept(rpmsg_inst, rpmsg_ept, name, dest); + } else { + /* + * The endpoint is not registered yet, this happens when the + * REMOTE core registers the endpoint before the HOST has + * had the chance to register it. Cache it saving name and + * destination address to be used by the next register_ept() + * call by the HOST core. + */ + strncpy(rpmsg_ept->name, name, sizeof(rpmsg_ept->name)); + rpmsg_ept->dest = dest; + k_mutex_unlock(&rpmsg_inst->mtx); + } +} + +static void bound_cb(struct ipc_rpmsg_ept *ept) +{ + rpmsg_send(&ept->ep, (uint8_t *)"", 0); + + if (ept->cb->bound) { + ept->cb->bound(ept->priv); + } +} + +static int ept_cb(struct rpmsg_endpoint *ep, void *data, size_t len, uint32_t src, void *priv) +{ + struct ipc_rpmsg_ept *ept; + + ept = (struct ipc_rpmsg_ept *) priv; + + /* + * the remote processor has send a ns announcement, we use an empty + * message to advice the remote side that a local endpoint has been + * created and that the processor is ready to communicate with this + * endpoint + * + * ipc_rpmsg_register_ept + * rpmsg_send_ns_message --------------> ns_bind_cb + * bound_cb + * ept_cb <--------------- rpmsg_send [empty message] + * bound_cb + */ + if (len == 0) { + if (!ept->bound) { + ept->bound = true; + bound_cb(ept); + } + return RPMSG_SUCCESS; + } + + if (ept->cb->received) { + ept->cb->received(data, len, ept->priv); + } + + return RPMSG_SUCCESS; +} + +static int vr_shm_configure(struct ipc_static_vrings *vr, const struct backend_config_t *conf) +{ + unsigned int num_desc; + + num_desc = optimal_num_desc(conf->shm_size); + if (num_desc == 0) { + return -ENOMEM; + } + + vr->shm_addr = conf->shm_addr + VDEV_STATUS_SIZE; + vr->shm_size = shm_size(num_desc) - VDEV_STATUS_SIZE; + + vr->rx_addr = vr->shm_addr + VRING_COUNT * vq_ring_size(num_desc); + vr->tx_addr = vr->rx_addr + vring_size(num_desc, VRING_ALIGNMENT); + + vr->status_reg_addr = conf->shm_addr; + + vr->vring_size = num_desc; + + return 0; +} + +static void virtio_notify_cb(struct virtqueue *vq, void *priv) +{ + struct backend_config_t *conf = priv; + + if (conf->mbox_tx.dev) { + mbox_send(&conf->mbox_tx, NULL); + } +} + +static void mbox_callback_process(struct k_work *item) +{ + struct backend_data_t *data; + unsigned int vq_id; + + data = CONTAINER_OF(item, struct backend_data_t, mbox_work); + vq_id = (data->role == ROLE_HOST) ? VIRTQUEUE_ID_HOST : VIRTQUEUE_ID_REMOTE; + + virtqueue_notification(data->vr.vq[vq_id]); +} + +static void mbox_callback(const struct device *instance, uint32_t channel, + void *user_data, struct mbox_msg *msg_data) +{ + struct backend_data_t *data = user_data; + + k_work_submit_to_queue(&mbox_wq, &data->mbox_work); +} + +static int mbox_init(const struct device *instance) +{ + const struct backend_config_t *conf = instance->config; + struct backend_data_t *data = instance->data; + int err; + + k_work_queue_start(&mbox_wq, mbox_stack, K_KERNEL_STACK_SIZEOF(mbox_stack), + WQ_PRIORITY, NULL); + + k_work_init(&data->mbox_work, mbox_callback_process); + + err = mbox_register_callback(&conf->mbox_rx, mbox_callback, data); + if (err != 0) { + return err; + } + + return mbox_set_enabled(&conf->mbox_rx, 1); +} + +static struct ipc_rpmsg_ept *register_ept_on_host(struct ipc_rpmsg_instance *rpmsg_inst, + const struct ipc_ept_cfg *cfg) +{ + struct ipc_rpmsg_ept *rpmsg_ept; + bool ept_cached; + + k_mutex_lock(&rpmsg_inst->mtx, K_FOREVER); + + ept_cached = get_ept(rpmsg_inst, &rpmsg_ept, cfg->name); + if (rpmsg_ept == NULL) { + k_mutex_unlock(&rpmsg_inst->mtx); + return NULL; + } + + rpmsg_ept->cb = &cfg->cb; + rpmsg_ept->priv = cfg->priv; + rpmsg_ept->bound = false; + rpmsg_ept->ep.priv = rpmsg_ept; + + if (ept_cached) { + /* + * The endpoint was cached in the NS bind callback. We can finally + * advertise it. + */ + k_mutex_unlock(&rpmsg_inst->mtx); + advertise_ept(rpmsg_inst, rpmsg_ept, cfg->name, rpmsg_ept->dest); + } else { + /* + * There is no endpoint in the cache because the REMOTE has + * not registered the endpoint yet. Cache it. + */ + strncpy(rpmsg_ept->name, cfg->name, sizeof(rpmsg_ept->name)); + k_mutex_unlock(&rpmsg_inst->mtx); + } + + return rpmsg_ept; +} + +static struct ipc_rpmsg_ept *register_ept_on_remote(struct ipc_rpmsg_instance *rpmsg_inst, + const struct ipc_ept_cfg *cfg) +{ + struct ipc_rpmsg_ept *rpmsg_ept; + int err; + + rpmsg_ept = get_available_ept_slot(rpmsg_inst); + if (rpmsg_ept == NULL) { + return NULL; + } + + rpmsg_ept->cb = &cfg->cb; + rpmsg_ept->priv = cfg->priv; + rpmsg_ept->bound = false; + rpmsg_ept->ep.priv = rpmsg_ept; + + strncpy(rpmsg_ept->name, cfg->name, sizeof(rpmsg_ept->name)); + + err = ipc_rpmsg_register_ept(rpmsg_inst, RPMSG_REMOTE, rpmsg_ept); + if (err != 0) { + return NULL; + } + + return rpmsg_ept; +} + +static int register_ept(const struct device *instance, void **token, + const struct ipc_ept_cfg *cfg) +{ + struct backend_data_t *data = instance->data; + struct ipc_rpmsg_instance *rpmsg_inst; + struct ipc_rpmsg_ept *rpmsg_ept; + + /* Instance is still being initialized */ + if (data->state == STATE_BUSY) { + return -EBUSY; + } + + /* Instance is not initialized */ + if (data->state == STATE_READY) { + return -EINVAL; + } + + /* Empty name is not valid */ + if (cfg->name == NULL || cfg->name[0] == '\0') { + return -EINVAL; + } + + rpmsg_inst = &data->rpmsg_inst; + + rpmsg_ept = (data->role == ROLE_HOST) ? + register_ept_on_host(rpmsg_inst, cfg) : + register_ept_on_remote(rpmsg_inst, cfg); + if (rpmsg_ept == NULL) { + return -EINVAL; + } + + (*token) = rpmsg_ept; + + return 0; +} + +static int send(const struct device *instance, void *token, + const void *msg, size_t len) +{ + struct backend_data_t *data = instance->data; + struct ipc_rpmsg_ept *rpmsg_ept; + + /* Instance is still being initialized */ + if (data->state == STATE_BUSY) { + return -EBUSY; + } + + /* Instance is not initialized */ + if (data->state == STATE_READY) { + return -EINVAL; + } + + /* Empty message is not allowed */ + if (len == 0) { + return -EBADMSG; + } + + rpmsg_ept = (struct ipc_rpmsg_ept *) token; + + return rpmsg_send(&rpmsg_ept->ep, msg, len); +} + +static int open(const struct device *instance) +{ + const struct backend_config_t *conf = instance->config; + struct backend_data_t *data = instance->data; + struct ipc_rpmsg_instance *rpmsg_inst; + int err; + + if (!atomic_cas(&data->state, STATE_READY, STATE_BUSY)) { + return -EALREADY; + } + + err = vr_shm_configure(&data->vr, conf); + if (err != 0) { + goto error; + } + + data->vr.notify_cb = virtio_notify_cb; + data->vr.priv = (void *) conf; + + err = ipc_static_vrings_init(&data->vr, conf->role); + if (err != 0) { + goto error; + } + + err = mbox_init(instance); + if (err != 0) { + goto error; + } + + rpmsg_inst = &data->rpmsg_inst; + + rpmsg_inst->bound_cb = bound_cb; + rpmsg_inst->cb = ept_cb; + + err = ipc_rpmsg_init(rpmsg_inst, data->role, data->vr.shm_io, &data->vr.vdev, + (void *) data->vr.shm_device.regions->virt, + data->vr.shm_device.regions->size, ns_bind_cb); + if (err != 0) { + goto error; + } + + atomic_set(&data->state, STATE_INITED); + return 0; + +error: + /* Back to the ready state */ + atomic_set(&data->state, STATE_READY); + return err; + +} + +const static struct ipc_service_backend backend_ops = { + .open_instance = open, + .register_endpoint = register_ept, + .send = send, +}; + +static int backend_init(const struct device *instance) +{ + const struct backend_config_t *conf = instance->config; + struct backend_data_t *data = instance->data; + + data->role = conf->role; + + k_mutex_init(&data->rpmsg_inst.mtx); + atomic_set(&data->state, STATE_READY); + + return 0; +} + +#define DEFINE_BACKEND_DEVICE(i) \ + static struct backend_config_t backend_config_##i = { \ + .role = DT_ENUM_IDX_OR(DT_DRV_INST(i), role, ROLE_HOST), \ + .shm_size = DT_REG_SIZE(DT_INST_PHANDLE(i, memory_region)), \ + .shm_addr = DT_REG_ADDR(DT_INST_PHANDLE(i, memory_region)), \ + .mbox_tx = MBOX_DT_CHANNEL_GET(DT_DRV_INST(i), tx), \ + .mbox_rx = MBOX_DT_CHANNEL_GET(DT_DRV_INST(i), rx), \ + }; \ + \ + static struct backend_data_t backend_data_##i; \ + \ + DEVICE_DT_INST_DEFINE(i, \ + &backend_init, \ + NULL, \ + &backend_data_##i, \ + &backend_config_##i, \ + POST_KERNEL, \ + CONFIG_IPC_SERVICE_REG_BACKEND_PRIORITY, \ + &backend_ops); + +DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE) diff --git a/subsys/ipc/ipc_service/backends/ipc_rpmsg_static_vrings.h b/subsys/ipc/ipc_service/backends/ipc_rpmsg_static_vrings.h new file mode 100644 index 00000000000..f355f2a3ceb --- /dev/null +++ b/subsys/ipc/ipc_service/backends/ipc_rpmsg_static_vrings.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2021 Carlo Caione + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include + +/* + * Endpoint registration flow: + * + * >>> Case #1: Endpoint registered on HOST first <<< + * + * [B] backend + * [O] OpenAMP + * + * REMOTE HOST + * ----------------------------------------------------------------- + * [B] register_ept ** + * [B] register_ept ** + * [B] ipc_rpmsg_register_ept + * [B] rpmsg_create_ept + * [O] rpmsg_send_ns_message + * [O] virtqueue_kick + * [O] virtio_notify_cb + * [B] mbox_send + * [B] mbox_callback + * [B] mbox_callback_process + * [B] virtqueue_notification + * [O] rpmsg_virtio_rx_callback + * [B] ns_bind_cb + * [B] rpmsg_create_ept + * [B] bound_cb + * [B] rpmsg_send + * [B] virtio_notify_cb + * [B] mbox_send + * [B] mbox_callback + * [B] mbox_callback_process + * [B] virtqueue_notification + * [O] rpmsg_virtio_rx_callback + * [O] ept_cb + * [B] bound_cb + * + * >>> Case #2: Endpoint registered on REMOTE first <<< + * + * [B] backend + * [O] OpenAMP + * + * REMOTE HOST + * ----------------------------------------------------------------- + * [B] register_ept ** + * [B] ipc_rpmsg_register_ept + * [B] rpmsg_create_ept + * [O] rpmsg_send_ns_message + * [O] virtqueue_kick + * [O] virtio_notify_cb + * [O] mbox_send + * [B] mbox_callback + * [B] mbox_callback_process + * [B] virtqueue_notification + * [O] rpmsg_virtio_rx_callback + * [B] ns_bind_cb + * + * [B] register_ept ** + * [B] rpmsg_create_ept + * [B] bound_cb + * [B] rpmsg_send + * [B] virtio_notify_cb + * [B] mbox_send + * [B] mbox_callback + * [B] mbox_callback_process + * [B] virtqueue_notification + * [O] rpmsg_virtio_rx_callback + * [O] ept_cb + * [B] bound_cb + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Endpoint registration flow (with focus on backend): + * + * >>> Case #1: Endpoint registered on HOST first <<< + * + * REMOTE HOST + * ----------------------------------------------------------------- + * register_ept() + * register_ept_on_host() + * get_ept() returns NULL + * name is cached in rpmsg_ept->name + * register_ept() + * register_ept_on_remote() + * ipc_rpmsg_register_ept() + * ns_bind_cb() + * get_ept() returns endpoint with cached name + * advertise_ept() + * rpmsg_create_ept() + * bound_cb() + * rpmsg_send() + * mbox_callback() + * mbox_callback_process() + * virtqueue_notification() + * ept_cb() + * bound_cb() + * + * >>> Case #2: Endpoint registered on REMOTE first <<< + * + * REMOTE HOST + * ----------------------------------------------------------------- + * register_ept() + * register_ept_on_remote() + * ipc_rpmsg_register_ept() + * ns_bind_cb() + * get_ept() return NULL + * name is cached in rpmsg_ept->name + * ... + * register_ept() + * register_ept_on_host() + * get_ept() returns endpoint with cached name + * advertise_ept() + * rpmsg_create_ept() + * bound_cb() + * rpmsg-send() + * mbox_callback() + * mbox_callback_process() + * virtqueue_notification() + * ept_cb() + * bound_cb() + * + */ + +#define VDEV_STATUS_SIZE (4) /* Size of status region */ + +#define VIRTQUEUE_ID_HOST (0) +#define VIRTQUEUE_ID_REMOTE (1) + +#define ROLE_HOST VIRTIO_DEV_MASTER +#define ROLE_REMOTE VIRTIO_DEV_SLAVE + +static inline size_t vq_ring_size(unsigned int num) +{ + return (RPMSG_BUFFER_SIZE * num); +} + +static inline size_t shm_size(unsigned int num) +{ + return (VDEV_STATUS_SIZE + (VRING_COUNT * vq_ring_size(num)) + + (VRING_COUNT * vring_size(num, VRING_ALIGNMENT))); +} + +static inline unsigned int optimal_num_desc(size_t shm_size) +{ + size_t available, single_alloc; + unsigned int num_desc; + + available = shm_size - VDEV_STATUS_SIZE; + single_alloc = VRING_COUNT * (vq_ring_size(1) + vring_size(1, VRING_ALIGNMENT)); + + num_desc = (unsigned int) (available / single_alloc); + + return (1 << (find_msb_set(num_desc) - 1)); +} diff --git a/subsys/ipc/ipc_service/backends/ipc_rpmsg_static_vrings_mi.h b/subsys/ipc/ipc_service/backends/ipc_rpmsg_static_vrings_mi.h index 10db446b1d9..9cff1bc86b3 100644 --- a/subsys/ipc/ipc_service/backends/ipc_rpmsg_static_vrings_mi.h +++ b/subsys/ipc/ipc_service/backends/ipc_rpmsg_static_vrings_mi.h @@ -7,7 +7,7 @@ #define SHM_START_ADDR CONFIG_IPC_SERVICE_BACKEND_RPMSG_MI_SHM_BASE_ADDRESS #define SHM_SIZE CONFIG_IPC_SERVICE_BACKEND_RPMSG_MI_SHM_SIZE -#define VRING_ALIGNMENT (4) /* Alignment of vring buffer */ +#define VRING_ALIGNMENT CONFIG_IPC_SERVICE_STATIC_VRINGS_ALIGNMENT #define VDEV_STATUS_SIZE (0x4) /* Size of status region */ #define VRING_COUNT (2) /* Number of used vring buffers. */