ipc_service: Add new backend based on custom messaging buffer

This commit adds ipc backend, that relies on simple
inter core messaging buffer also added by this commit.

This backend is configurable with DT overlay. Each
ipc instance could be defined with ipc-icmsg commpatible.

Signed-off-by: Emil Obalski <emil.obalski@nordicsemi.no>
This commit is contained in:
Emil Obalski 2022-03-14 15:59:04 +01:00 committed by Carles Cufí
commit 40b28ab51e
9 changed files with 562 additions and 0 deletions

View file

@ -0,0 +1,30 @@
#
# Copyright (c) 2022 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
#
description: Inter core messaging backend
compatible: "zephyr,ipc-icmsg"
include: base.yaml
properties:
tx-region:
description: phandle to the shared memory region used for data transmission
required: true
type: phandle
rx-region:
description: phandle to the shared memory region used for data reception
required: true
type: phandle
mboxes:
description: phandle to the MBOX controller (TX and RX are required)
required: true
mbox-names:
description: MBOX channel names (must be called "tx" and "rx")
required: true

View file

@ -0,0 +1,94 @@
/*
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_IPC_SERVICE_IPC_ICMSG_BUF_H_
#define ZEPHYR_INCLUDE_IPC_SERVICE_IPC_ICMSG_BUF_H_
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief IPC Service ICMsg buffer API
* @ingroup ipc_service_icmsg_buffer_api IPC service ICMsg buffer API
* @{
*/
/**
* @brief Inter core messaging buffer
*
* The inter core messaging buffer implements lightweight unidirectional
* messaging buffer with read/write semantics on top of a memory region shared
* by the reader and writer. It embeds cache and memory barier management to
* ensure correct data access.
*
* This structure supports single writter and reader. Data stored in the buffer
* is encapsulated to a message.
*
*/
struct icmsg_buf {
uint32_t len; /* Length of data[] in bytes. */
uint32_t wr_idx; /* Index of the first free byte in data[] */
uint32_t rd_idx; /* Index of the first valid byte in data[] */
uint8_t data[]; /* Buffer data. */
};
/**
* @brief Initialize inter core messaging buffer.
*
* This function initializes inter core messaging buffer on top of dedicated
* memory region.
*
* @param buf Pointer to a memory region on which buffer is
* created.
* @param blen Length of the buffer. Must be large enough to
* contain the internal structure and at least two
* bytes of data (one is reserved for written
* messages length).
* @retval struct icmsg_buf* Pointer to the created buffer. The pointer
* points to the same address as buf.
*/
struct icmsg_buf *icmsg_buf_init(void *buf, size_t blen);
/**
* @brief Write specified amount of data to the inter core messaging buffer.
*
* @param ib A icmsg buffer to which to write.
* @param buf Pointer to the data to be written to icmsg buffer.
* @param len Number of bytes to be written to the icmsg buffer.
* @retval int Number of bytes written, negative error code on fail.
* -EINVAL, if len == 0.
* -ENOMEM, if len is bigger than the icmsg buffer can fit.
*/
int icmsg_buf_write(struct icmsg_buf *ib, const char *buf, uint16_t len);
/**
* @brief Read specified amount of data from the inter core messaging buffer.
*
* Single read allows to read the message send by the single write.
* The provided buf must be big enough to store the whole message.
*
* @param ib A icmsg buffer to which data are to be written
* @param buf Data pointer to which read data will be written.
* If NULL, len of stored message is returned.
* @param len Number of bytes to be read from the icmsg buffer.
* @retval int Bytes read, negative error code on fail.
* Bytes to be read, if buf == NULL.
* -ENOMEM, if message can not fit in provided buf.
* -EAGAIN, if not whole message is ready yet.
*/
int icmsg_buf_read(struct icmsg_buf *ib, char *buf, uint16_t len);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_IPC_SERVICE_IPC_ICMSG_BUF_H_ */

View file

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
zephyr_sources_ifdef(CONFIG_IPC_SERVICE_BACKEND_ICMSG ipc_icmsg.c)
zephyr_sources_ifdef(CONFIG_IPC_SERVICE_BACKEND_RPMSG ipc_rpmsg_static_vrings.c) zephyr_sources_ifdef(CONFIG_IPC_SERVICE_BACKEND_RPMSG ipc_rpmsg_static_vrings.c)

View file

@ -11,6 +11,14 @@ config IPC_SERVICE_BACKEND_RPMSG
select IPC_SERVICE_STATIC_VRINGS select IPC_SERVICE_STATIC_VRINGS
select OPENAMP select OPENAMP
config IPC_SERVICE_BACKEND_ICMSG
bool "ICMSG backend with ICMsg buffer"
depends on MBOX
select IPC_SERVICE_ICMSG_BUF
help
Chosing this backend results in single endpoint implementation based
on ringbuf.
config IPC_SERVICE_BACKEND_ZTEST config IPC_SERVICE_BACKEND_ZTEST
depends on ZTEST depends on ZTEST
bool "IPC service backend test" bool "IPC service backend test"
@ -46,4 +54,10 @@ config IPC_SERVICE_STATIC_VRINGS_ALIGNMENT
help help
Static VRINGs alignment Static VRINGs alignment
config IPC_SERVICE_ICMSG_BUF
bool "Inter core messaging buffer support library"
help
Inter core messaging buffer library
rsource "Kconfig.icmsg"
rsource "Kconfig.rpmsg" rsource "Kconfig.rpmsg"

View file

@ -0,0 +1,16 @@
# Copyright (c) 2022 Nordic Semiconductor ASA
# SPDX-License-Identifier: Apache-2.0
if IPC_SERVICE_BACKEND_ICMSG
config IPC_SERVICE_BACKEND_ICMSG_CB_BUF_SIZE
int "Size of static callback buffer size"
range 1 65535
default 255
help
Size of callback buffer used for processing received data in work
queue thread. If you are sure that your application never sends data
data bigger than some size, you can safely change this option to
reduce RAM consumption in your application.
endif # IPC_SERVICE_BACKEND_ICMSG

View file

@ -0,0 +1,222 @@
/*
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr.h>
#include <device.h>
#include <string.h>
#include <drivers/mbox.h>
#include <ipc/ipc_service_backend.h>
#include "ipc_icmsg.h"
#define DT_DRV_COMPAT zephyr_ipc_icmsg
#define CB_BUF_SIZE CONFIG_IPC_SERVICE_BACKEND_ICMSG_CB_BUF_SIZE
static const uint8_t magic[] = {0x45, 0x6d, 0x31, 0x6c, 0x31, 0x4b,
0x30, 0x72, 0x6e, 0x33, 0x6c, 0x69, 0x34};
BUILD_ASSERT(sizeof(magic) <= CB_BUF_SIZE);
BUILD_ASSERT(CB_BUF_SIZE <= UINT16_MAX);
struct backend_data_t {
/* Tx/Rx buffers. */
struct icmsg_buf *tx_ib;
struct icmsg_buf *rx_ib;
/* Backend ops for an endpoint. */
const struct ipc_service_cb *ops;
/* General */
struct k_work mbox_work;
atomic_t state;
};
struct backend_config_t {
uintptr_t tx_shm_addr;
uintptr_t rx_shm_addr;
size_t tx_shm_size;
size_t rx_shm_size;
struct mbox_channel mbox_tx;
struct mbox_channel mbox_rx;
};
static void mbox_callback_process(struct k_work *item)
{
struct backend_data_t *dev_data = CONTAINER_OF(item, struct backend_data_t, mbox_work);
uint8_t cb_buffer[CB_BUF_SIZE] __aligned(4);
atomic_t state = atomic_get(&dev_data->state);
int len = icmsg_buf_read(dev_data->rx_ib, cb_buffer, CB_BUF_SIZE);
__ASSERT_NO_MSG(len <= CB_BUF_SIZE);
if (len == -EAGAIN) {
__ASSERT_NO_MSG(false);
(void)k_work_submit(&dev_data->mbox_work);
return;
} else if (len <= 0) {
return;
}
if (state == ICMSG_STATE_READY) {
if (dev_data->ops && dev_data->ops->received) {
dev_data->ops->received(cb_buffer, len, NULL);
}
/* Reading with NULL buffer to know if there are data in the
* buffer to be read.
*/
len = icmsg_buf_read(dev_data->rx_ib, NULL, 0);
if (len > 0) {
(void)k_work_submit(&dev_data->mbox_work);
}
} else {
__ASSERT_NO_MSG(state == ICMSG_STATE_BUSY);
if (len != sizeof(magic) || memcmp(magic, cb_buffer, len)) {
__ASSERT_NO_MSG(false);
return;
}
if (dev_data->ops && dev_data->ops->bound) {
dev_data->ops->bound(NULL);
}
atomic_set(&dev_data->state, ICMSG_STATE_READY);
}
}
static void mbox_callback(const struct device *instance, uint32_t channel,
void *user_data, struct mbox_msg *msg_data)
{
struct backend_data_t *dev_data = user_data;
(void)k_work_submit(&dev_data->mbox_work);
}
static int mbox_init(const struct device *instance)
{
const struct backend_config_t *conf = instance->config;
struct backend_data_t *dev_data = instance->data;
int err;
k_work_init(&dev_data->mbox_work, mbox_callback_process);
err = mbox_register_callback(&conf->mbox_rx, mbox_callback, dev_data);
if (err != 0) {
return err;
}
return mbox_set_enabled(&conf->mbox_rx, 1);
}
static int register_ept(const struct device *instance, void **token,
const struct ipc_ept_cfg *cfg)
{
const struct backend_config_t *conf = instance->config;
struct backend_data_t *dev_data = instance->data;
struct ipc_ept *ep = (struct ipc_ept *)token;
int ret;
if (!atomic_cas(&dev_data->state, ICMSG_STATE_OFF, ICMSG_STATE_BUSY)) {
/* Already registered. This backend supports single ep. */
return -EALREADY;
}
ep->instance = instance;
dev_data->ops = &cfg->cb;
ret = mbox_init(instance);
if (ret) {
return ret;
}
ret = icmsg_buf_write(dev_data->tx_ib, magic, sizeof(magic));
if (ret < sizeof(magic)) {
__ASSERT_NO_MSG(ret == sizeof(magic));
return ret;
}
ret = mbox_send(&conf->mbox_tx, NULL);
if (ret) {
return ret;
}
ret = icmsg_buf_read(dev_data->rx_ib, NULL, 0);
if (ret > 0) {
(void)k_work_submit(&dev_data->mbox_work);
}
return 0;
}
static int send(const struct device *instance, void *token,
const void *msg, size_t len)
{
const struct backend_config_t *conf = instance->config;
struct backend_data_t *dev_data = instance->data;
int ret;
if (atomic_get(&dev_data->state) != ICMSG_STATE_READY) {
return -EBUSY;
}
/* Empty message is not allowed */
if (len == 0) {
return -ENODATA;
}
ret = icmsg_buf_write(dev_data->tx_ib, msg, len);
if (ret < 0) {
return ret;
} else if (ret < len) {
return -EBADMSG;
}
__ASSERT_NO_MSG(conf->mbox_tx.dev != NULL);
return mbox_send(&conf->mbox_tx, NULL);
}
const static struct ipc_service_backend backend_ops = {
.register_endpoint = register_ept,
.send = send,
};
static int backend_init(const struct device *instance)
{
const struct backend_config_t *conf = instance->config;
struct backend_data_t *dev_data = instance->data;
__ASSERT_NO_MSG(conf->tx_shm_size > sizeof(struct icmsg_buf));
dev_data->tx_ib = icmsg_buf_init((void *)conf->tx_shm_addr, conf->tx_shm_size);
dev_data->rx_ib = (void *)conf->rx_shm_addr;
return 0;
}
#define DEFINE_BACKEND_DEVICE(i) \
static const struct backend_config_t backend_config_##i = { \
.tx_shm_size = DT_REG_SIZE(DT_INST_PHANDLE(i, tx_region)), \
.tx_shm_addr = DT_REG_ADDR(DT_INST_PHANDLE(i, tx_region)), \
.rx_shm_size = DT_REG_SIZE(DT_INST_PHANDLE(i, rx_region)), \
.rx_shm_addr = DT_REG_ADDR(DT_INST_PHANDLE(i, rx_region)), \
.mbox_tx = MBOX_DT_CHANNEL_GET(DT_DRV_INST(i), tx), \
.mbox_rx = MBOX_DT_CHANNEL_GET(DT_DRV_INST(i), rx), \
}; \
\
static struct backend_data_t backend_data_##i; \
\
DEVICE_DT_INST_DEFINE(i, \
&backend_init, \
NULL, \
&backend_data_##i, \
&backend_config_##i, \
POST_KERNEL, \
CONFIG_IPC_SERVICE_REG_BACKEND_PRIORITY, \
&backend_ops);
DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE)

View file

@ -0,0 +1,13 @@
/*
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ipc/ipc_icmsg_buf.h>
enum icmsg_state {
ICMSG_STATE_OFF,
ICMSG_STATE_BUSY,
ICMSG_STATE_READY
};

View file

@ -1,4 +1,5 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
zephyr_sources_ifdef(CONFIG_IPC_SERVICE_ICMSG_BUF ipc_icmsg_buf.c)
zephyr_sources_ifdef(CONFIG_IPC_SERVICE_RPMSG ipc_rpmsg.c) zephyr_sources_ifdef(CONFIG_IPC_SERVICE_RPMSG ipc_rpmsg.c)
zephyr_sources_ifdef(CONFIG_IPC_SERVICE_STATIC_VRINGS ipc_static_vrings.c) zephyr_sources_ifdef(CONFIG_IPC_SERVICE_STATIC_VRINGS ipc_static_vrings.c)

View file

@ -0,0 +1,171 @@
/*
* Copyright (c) 2022 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr.h>
#include <string.h>
#include <errno.h>
#include <cache.h>
#include <ipc/ipc_icmsg_buf.h>
/* Helpers */
static uint32_t idx_occupied(uint32_t len, uint32_t a, uint32_t b)
{
/* It is implicitly assumed a and b cannot differ by more then len. */
return (b > a) ? (len - (b - a)) : (a - b);
}
static uint32_t idx_cut(uint32_t len, uint32_t idx)
{
/* It is implicitly assumed a and b cannot differ by more then len. */
return (idx >= len) ? (idx - len) : (idx);
}
struct icmsg_buf *icmsg_buf_init(void *buf, size_t blen)
{
/* blen must be big enough to contain icmsg_buf struct, byte of data
* and message len (2 bytes).
*/
struct icmsg_buf *ib = buf;
__ASSERT_NO_MSG(blen > (sizeof(*ib) + sizeof(uint16_t)));
ib->len = blen - sizeof(*ib);
ib->wr_idx = 0;
ib->rd_idx = 0;
__sync_synchronize();
sys_cache_data_range(ib, sizeof(*ib), K_CACHE_WB);
return ib;
}
int icmsg_buf_write(struct icmsg_buf *ib, const char *buf, uint16_t len)
{
/* The length of buffer is immutable - avoid reloading that may happen
* due to memory bariers.
*/
const uint32_t iblen = ib->len;
/* rx_idx == wr_idx means the buffer is empty.
* Max bytes that can be stored is len - 1.
*/
const uint32_t max_len = iblen - 1;
sys_cache_data_range(ib, sizeof(*ib), K_CACHE_INVD);
__sync_synchronize();
uint32_t wr_idx = ib->wr_idx;
uint32_t rd_idx = ib->rd_idx;
if (len == 0) {
/* Incorrect call. */
return -EINVAL;
}
uint32_t avail = max_len - idx_occupied(iblen, wr_idx, rd_idx);
if ((len + sizeof(len) > avail) ||
(len + sizeof(len) > max_len)) {
/* No free space. */
return -ENOMEM;
}
/* Store info about the message length. */
ib->data[wr_idx] = (uint8_t)len;
sys_cache_data_range(&ib->data[wr_idx], sizeof(ib->data[wr_idx]), K_CACHE_WB);
wr_idx = idx_cut(iblen, wr_idx + sizeof(ib->data[wr_idx]));
ib->data[wr_idx] = (uint8_t)(len >> 8);
sys_cache_data_range(&ib->data[wr_idx], sizeof(ib->data[wr_idx]), K_CACHE_WB);
wr_idx = idx_cut(iblen, wr_idx + sizeof(ib->data[wr_idx]));
/* Write until the end of the buffer. */
uint32_t sz = MIN(len, iblen - wr_idx);
memcpy(&ib->data[wr_idx], buf, sz);
sys_cache_data_range(&ib->data[wr_idx], sz, K_CACHE_WB);
if (len > sz) {
/* Write remaining data at the buffer head. */
memcpy(&ib->data[0], buf + sz, len - sz);
sys_cache_data_range(&ib->data[0], len - sz, K_CACHE_WB);
}
/* Update write index - make other side aware data was written. */
__sync_synchronize();
wr_idx = idx_cut(iblen, wr_idx + len);
ib->wr_idx = wr_idx;
sys_cache_data_range(ib, sizeof(*ib), K_CACHE_WB);
return len;
}
int icmsg_buf_read(struct icmsg_buf *ib, char *buf, uint16_t len)
{
/* The length of buffer is immutable - avoid reloading. */
const uint32_t iblen = ib->len;
sys_cache_data_range(ib, sizeof(*ib), K_CACHE_INVD);
__sync_synchronize();
uint32_t rd_idx = ib->rd_idx;
uint32_t wr_idx = ib->wr_idx;
if (rd_idx == wr_idx) {
/* The buffer is empty. */
return 0;
}
uint32_t bytes_stored = idx_occupied(iblen, wr_idx, rd_idx);
/* Read message len. */
sys_cache_data_range(&ib->data[rd_idx], sizeof(ib->data[rd_idx]), K_CACHE_INVD);
uint16_t mlen = ib->data[rd_idx];
rd_idx = idx_cut(iblen, rd_idx + sizeof(ib->data[rd_idx]));
sys_cache_data_range(&ib->data[rd_idx], sizeof(ib->data[rd_idx]), K_CACHE_INVD);
mlen |= (ib->data[rd_idx] << 8);
rd_idx = idx_cut(iblen, rd_idx + sizeof(ib->data[rd_idx]));
if (!buf) {
return mlen;
}
if (len < mlen) {
return -ENOMEM;
}
if (bytes_stored < mlen + sizeof(mlen)) {
/* Part of message not available. Should not happen. */
__ASSERT_NO_MSG(false);
return -EAGAIN;
}
len = MIN(len, mlen);
/* Read up to the end of the buffer. */
uint32_t sz = MIN(len, iblen - rd_idx);
sys_cache_data_range(&ib->data[rd_idx], sz, K_CACHE_INVD);
memcpy(buf, &ib->data[rd_idx], sz);
if (len > sz) {
/* Read remaining bytes starting from the buffer head. */
sys_cache_data_range(&ib->data[0], len - sz, K_CACHE_INVD);
memcpy(&buf[sz], &ib->data[0], len - sz);
}
/* Update read index - make other side aware data was read. */
__sync_synchronize();
rd_idx = idx_cut(iblen, rd_idx + len);
ib->rd_idx = rd_idx;
sys_cache_data_range(ib, sizeof(*ib), K_CACHE_WB);
return len;
}