ipc: icmsg: Align to NO MULTITHREADING

Adapting icmsg to work without the MULTITHREADING functionality.
Dependencies for kernel work_queue, mutexes and other functions
related to running multithreaded applications have been 'ifdefed'.

Signed-off-by: Jakub Zymelka <jakub.zymelka@nordicsemi.no>
This commit is contained in:
Jakub Zymelka 2024-06-14 11:11:27 +02:00 committed by Carles Cufí
commit bb5b98f16c
3 changed files with 89 additions and 17 deletions

View file

@ -51,8 +51,10 @@ struct icmsg_data_t {
/* General */ /* General */
const struct icmsg_config_t *cfg; const struct icmsg_config_t *cfg;
#ifdef CONFIG_MULTITHREADING
struct k_work_delayable notify_work; struct k_work_delayable notify_work;
struct k_work mbox_work; struct k_work mbox_work;
#endif
atomic_t state; atomic_t state;
}; };

View file

@ -3,6 +3,7 @@
config IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC config IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
bool "Synchronize access to shared memory" bool "Synchronize access to shared memory"
depends on MULTITHREADING
default y default y
help help
Provide synchronization access to shared memory at a library level. Provide synchronization access to shared memory at a library level.
@ -30,6 +31,7 @@ config IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS
config IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE config IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE
bool "Use dedicated workqueue" bool "Use dedicated workqueue"
depends on MULTITHREADING
default y default y
help help
Enable dedicated workqueue thread for the ICMsg backend. Enable dedicated workqueue thread for the ICMsg backend.

View file

@ -15,10 +15,10 @@
#define BOND_NOTIFY_REPEAT_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS) #define BOND_NOTIFY_REPEAT_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS)
#define SHMEM_ACCESS_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS) #define SHMEM_ACCESS_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS)
static const uint8_t magic[] = {0x45, 0x6d, 0x31, 0x6c, 0x31, 0x4b, static const uint8_t magic[] = {0x45, 0x6d, 0x31, 0x6c, 0x31, 0x4b,
0x30, 0x72, 0x6e, 0x33, 0x6c, 0x69, 0x34}; 0x30, 0x72, 0x6e, 0x33, 0x6c, 0x69, 0x34};
#ifdef CONFIG_MULTITHREADING
#if defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE) #if defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE)
static K_THREAD_STACK_DEFINE(icmsg_stack, CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE); static K_THREAD_STACK_DEFINE(icmsg_stack, CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE);
static struct k_work_q icmsg_workq; static struct k_work_q icmsg_workq;
@ -26,6 +26,10 @@ static struct k_work_q *const workq = &icmsg_workq;
#else #else
static struct k_work_q *const workq = &k_sys_work_q; static struct k_work_q *const workq = &k_sys_work_q;
#endif #endif
static void mbox_callback_process(struct k_work *item);
#else
static void mbox_callback_process(struct icmsg_data_t *dev_data);
#endif
static int mbox_deinit(const struct icmsg_config_t *conf, static int mbox_deinit(const struct icmsg_config_t *conf,
struct icmsg_data_t *dev_data) struct icmsg_data_t *dev_data)
@ -42,12 +46,20 @@ static int mbox_deinit(const struct icmsg_config_t *conf,
return err; return err;
} }
#ifdef CONFIG_MULTITHREADING
(void)k_work_cancel(&dev_data->mbox_work); (void)k_work_cancel(&dev_data->mbox_work);
(void)k_work_cancel_delayable(&dev_data->notify_work); (void)k_work_cancel_delayable(&dev_data->notify_work);
#endif
return 0; return 0;
} }
static bool is_endpoint_ready(struct icmsg_data_t *dev_data)
{
return atomic_get(&dev_data->state) == ICMSG_STATE_READY;
}
#ifdef CONFIG_MULTITHREADING
static void notify_process(struct k_work *item) static void notify_process(struct k_work *item)
{ {
struct k_work_delayable *dwork = k_work_delayable_from_work(item); struct k_work_delayable *dwork = k_work_delayable_from_work(item);
@ -66,37 +78,53 @@ static void notify_process(struct k_work *item)
(void)ret; (void)ret;
} }
} }
#else
static bool is_endpoint_ready(struct icmsg_data_t *dev_data) static void notify_process(struct icmsg_data_t *dev_data)
{ {
return atomic_get(&dev_data->state) == ICMSG_STATE_READY; (void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
} #if defined(CONFIG_SYS_CLOCK_EXISTS)
int64_t start = k_uptime_get();
#endif
while (false == is_endpoint_ready(dev_data)) {
mbox_callback_process(dev_data);
#if defined(CONFIG_SYS_CLOCK_EXISTS)
if ((k_uptime_get() - start) > CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS) {
#endif
(void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL);
#if defined(CONFIG_SYS_CLOCK_EXISTS)
start = k_uptime_get();
};
#endif
}
}
#endif
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
static int reserve_tx_buffer_if_unused(struct icmsg_data_t *dev_data) static int reserve_tx_buffer_if_unused(struct icmsg_data_t *dev_data)
{ {
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
int ret = k_mutex_lock(&dev_data->tx_lock, SHMEM_ACCESS_TO); int ret = k_mutex_lock(&dev_data->tx_lock, SHMEM_ACCESS_TO);
if (ret < 0) { if (ret < 0) {
return ret; return ret;
} }
#endif
return 0; return 0;
} }
static int release_tx_buffer(struct icmsg_data_t *dev_data) static int release_tx_buffer(struct icmsg_data_t *dev_data)
{ {
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
return k_mutex_unlock(&dev_data->tx_lock); return k_mutex_unlock(&dev_data->tx_lock);
#endif
return 0;
} }
#endif
static uint32_t data_available(struct icmsg_data_t *dev_data) static uint32_t data_available(struct icmsg_data_t *dev_data)
{ {
return pbuf_read(dev_data->rx_pb, NULL, 0); return pbuf_read(dev_data->rx_pb, NULL, 0);
} }
#ifdef CONFIG_MULTITHREADING
static void submit_mbox_work(struct icmsg_data_t *dev_data) static void submit_mbox_work(struct icmsg_data_t *dev_data)
{ {
if (k_work_submit_to_queue(workq, &dev_data->mbox_work) < 0) { if (k_work_submit_to_queue(workq, &dev_data->mbox_work) < 0) {
@ -121,10 +149,33 @@ static void submit_work_if_buffer_free_and_data_available(
submit_mbox_work(dev_data); submit_mbox_work(dev_data);
} }
#else
static void mbox_callback_process(struct k_work *item) static void submit_if_buffer_free(struct icmsg_data_t *dev_data)
{ {
mbox_callback_process(dev_data);
}
static void submit_if_buffer_free_and_data_available(
struct icmsg_data_t *dev_data)
{
if (!data_available(dev_data)) {
return;
}
mbox_callback_process(dev_data);
}
#endif
#ifdef CONFIG_MULTITHREADING
static void mbox_callback_process(struct k_work *item)
#else
static void mbox_callback_process(struct icmsg_data_t *dev_data)
#endif
{
#ifdef CONFIG_MULTITHREADING
struct icmsg_data_t *dev_data = CONTAINER_OF(item, struct icmsg_data_t, mbox_work); struct icmsg_data_t *dev_data = CONTAINER_OF(item, struct icmsg_data_t, mbox_work);
#endif
atomic_t state = atomic_get(&dev_data->state); atomic_t state = atomic_get(&dev_data->state);
@ -141,8 +192,7 @@ static void mbox_callback_process(struct k_work *item)
if (state == ICMSG_STATE_READY) { if (state == ICMSG_STATE_READY) {
if (dev_data->cb->received) { if (dev_data->cb->received) {
dev_data->cb->received(rx_buffer, len, dev_data->cb->received(rx_buffer, len, dev_data->ctx);
dev_data->ctx);
} }
} else { } else {
__ASSERT_NO_MSG(state == ICMSG_STATE_BUSY); __ASSERT_NO_MSG(state == ICMSG_STATE_BUSY);
@ -162,15 +212,22 @@ static void mbox_callback_process(struct k_work *item)
atomic_set(&dev_data->state, ICMSG_STATE_READY); atomic_set(&dev_data->state, ICMSG_STATE_READY);
} }
#ifdef CONFIG_MULTITHREADING
submit_work_if_buffer_free_and_data_available(dev_data); submit_work_if_buffer_free_and_data_available(dev_data);
#else
submit_if_buffer_free_and_data_available(dev_data);
#endif
} }
static void mbox_callback(const struct device *instance, uint32_t channel, static void mbox_callback(const struct device *instance, uint32_t channel,
void *user_data, struct mbox_msg *msg_data) void *user_data, struct mbox_msg *msg_data)
{ {
struct icmsg_data_t *dev_data = user_data; struct icmsg_data_t *dev_data = user_data;
#ifdef CONFIG_MULTITHREADING
submit_work_if_buffer_free(dev_data); submit_work_if_buffer_free(dev_data);
#else
submit_if_buffer_free(dev_data);
#endif
} }
static int mbox_init(const struct icmsg_config_t *conf, static int mbox_init(const struct icmsg_config_t *conf,
@ -178,8 +235,10 @@ static int mbox_init(const struct icmsg_config_t *conf,
{ {
int err; int err;
#ifdef CONFIG_MULTITHREADING
k_work_init(&dev_data->mbox_work, mbox_callback_process); k_work_init(&dev_data->mbox_work, mbox_callback_process);
k_work_init_delayable(&dev_data->notify_work, notify_process); k_work_init_delayable(&dev_data->notify_work, notify_process);
#endif
err = mbox_register_callback_dt(&conf->mbox_rx, mbox_callback, dev_data); err = mbox_register_callback_dt(&conf->mbox_rx, mbox_callback, dev_data);
if (err != 0) { if (err != 0) {
@ -233,12 +292,14 @@ int icmsg_open(const struct icmsg_config_t *conf,
if (ret) { if (ret) {
return ret; return ret;
} }
#ifdef CONFIG_MULTITHREADING
ret = k_work_schedule_for_queue(workq, &dev_data->notify_work, K_NO_WAIT); ret = k_work_schedule_for_queue(workq, &dev_data->notify_work, K_NO_WAIT);
if (ret < 0) { if (ret < 0) {
return ret; return ret;
} }
#else
notify_process(dev_data);
#endif
return 0; return 0;
} }
@ -263,7 +324,9 @@ int icmsg_send(const struct icmsg_config_t *conf,
{ {
int ret; int ret;
int write_ret; int write_ret;
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
int release_ret; int release_ret;
#endif
int sent_bytes; int sent_bytes;
if (!is_endpoint_ready(dev_data)) { if (!is_endpoint_ready(dev_data)) {
@ -275,14 +338,19 @@ int icmsg_send(const struct icmsg_config_t *conf,
return -ENODATA; return -ENODATA;
} }
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
ret = reserve_tx_buffer_if_unused(dev_data); ret = reserve_tx_buffer_if_unused(dev_data);
if (ret < 0) { if (ret < 0) {
return -ENOBUFS; return -ENOBUFS;
} }
#endif
write_ret = pbuf_write(dev_data->tx_pb, msg, len); write_ret = pbuf_write(dev_data->tx_pb, msg, len);
#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC
release_ret = release_tx_buffer(dev_data); release_ret = release_tx_buffer(dev_data);
__ASSERT_NO_MSG(!release_ret); __ASSERT_NO_MSG(!release_ret);
#endif
if (write_ret < 0) { if (write_ret < 0) {
return write_ret; return write_ret;