From bb5b98f16c7dbea379acecf81b780699b1082cb9 Mon Sep 17 00:00:00 2001 From: Jakub Zymelka Date: Fri, 14 Jun 2024 11:11:27 +0200 Subject: [PATCH] ipc: icmsg: Align to NO MULTITHREADING Adapting icmsg to work without the MULTITHREADING functionality. Dependencies for kernel work_queue, mutexes and other functions related to running multithreaded applications have been 'ifdefed'. Signed-off-by: Jakub Zymelka --- include/zephyr/ipc/icmsg.h | 2 + subsys/ipc/ipc_service/lib/Kconfig.icmsg | 2 + subsys/ipc/ipc_service/lib/icmsg.c | 102 +++++++++++++++++++---- 3 files changed, 89 insertions(+), 17 deletions(-) diff --git a/include/zephyr/ipc/icmsg.h b/include/zephyr/ipc/icmsg.h index 80e3412095a..9a04aa980a0 100644 --- a/include/zephyr/ipc/icmsg.h +++ b/include/zephyr/ipc/icmsg.h @@ -51,8 +51,10 @@ struct icmsg_data_t { /* General */ const struct icmsg_config_t *cfg; +#ifdef CONFIG_MULTITHREADING struct k_work_delayable notify_work; struct k_work mbox_work; +#endif atomic_t state; }; diff --git a/subsys/ipc/ipc_service/lib/Kconfig.icmsg b/subsys/ipc/ipc_service/lib/Kconfig.icmsg index 39b8c8d0c75..7a9a93e86de 100644 --- a/subsys/ipc/ipc_service/lib/Kconfig.icmsg +++ b/subsys/ipc/ipc_service/lib/Kconfig.icmsg @@ -3,6 +3,7 @@ config IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC bool "Synchronize access to shared memory" + depends on MULTITHREADING default y help Provide synchronization access to shared memory at a library level. @@ -30,6 +31,7 @@ config IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS config IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE bool "Use dedicated workqueue" + depends on MULTITHREADING default y help Enable dedicated workqueue thread for the ICMsg backend. diff --git a/subsys/ipc/ipc_service/lib/icmsg.c b/subsys/ipc/ipc_service/lib/icmsg.c index 37fbc30d89e..73fac78fd69 100644 --- a/subsys/ipc/ipc_service/lib/icmsg.c +++ b/subsys/ipc/ipc_service/lib/icmsg.c @@ -15,10 +15,10 @@ #define BOND_NOTIFY_REPEAT_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS) #define SHMEM_ACCESS_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS) - static const uint8_t magic[] = {0x45, 0x6d, 0x31, 0x6c, 0x31, 0x4b, 0x30, 0x72, 0x6e, 0x33, 0x6c, 0x69, 0x34}; +#ifdef CONFIG_MULTITHREADING #if defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE) static K_THREAD_STACK_DEFINE(icmsg_stack, CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE); static struct k_work_q icmsg_workq; @@ -26,6 +26,10 @@ static struct k_work_q *const workq = &icmsg_workq; #else static struct k_work_q *const workq = &k_sys_work_q; #endif +static void mbox_callback_process(struct k_work *item); +#else +static void mbox_callback_process(struct icmsg_data_t *dev_data); +#endif static int mbox_deinit(const struct icmsg_config_t *conf, struct icmsg_data_t *dev_data) @@ -42,12 +46,20 @@ static int mbox_deinit(const struct icmsg_config_t *conf, return err; } +#ifdef CONFIG_MULTITHREADING (void)k_work_cancel(&dev_data->mbox_work); (void)k_work_cancel_delayable(&dev_data->notify_work); +#endif return 0; } +static bool is_endpoint_ready(struct icmsg_data_t *dev_data) +{ + return atomic_get(&dev_data->state) == ICMSG_STATE_READY; +} + +#ifdef CONFIG_MULTITHREADING static void notify_process(struct k_work *item) { struct k_work_delayable *dwork = k_work_delayable_from_work(item); @@ -66,37 +78,53 @@ static void notify_process(struct k_work *item) (void)ret; } } - -static bool is_endpoint_ready(struct icmsg_data_t *dev_data) +#else +static void notify_process(struct icmsg_data_t *dev_data) { - return atomic_get(&dev_data->state) == ICMSG_STATE_READY; -} + (void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL); +#if defined(CONFIG_SYS_CLOCK_EXISTS) + int64_t start = k_uptime_get(); +#endif + while (false == is_endpoint_ready(dev_data)) { + mbox_callback_process(dev_data); + +#if defined(CONFIG_SYS_CLOCK_EXISTS) + if ((k_uptime_get() - start) > CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS) { +#endif + (void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL); +#if defined(CONFIG_SYS_CLOCK_EXISTS) + start = k_uptime_get(); + }; +#endif + } +} +#endif + +#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC static int reserve_tx_buffer_if_unused(struct icmsg_data_t *dev_data) { -#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC int ret = k_mutex_lock(&dev_data->tx_lock, SHMEM_ACCESS_TO); if (ret < 0) { return ret; } -#endif + return 0; } static int release_tx_buffer(struct icmsg_data_t *dev_data) { -#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC return k_mutex_unlock(&dev_data->tx_lock); -#endif - return 0; } +#endif static uint32_t data_available(struct icmsg_data_t *dev_data) { return pbuf_read(dev_data->rx_pb, NULL, 0); } +#ifdef CONFIG_MULTITHREADING static void submit_mbox_work(struct icmsg_data_t *dev_data) { if (k_work_submit_to_queue(workq, &dev_data->mbox_work) < 0) { @@ -121,10 +149,33 @@ static void submit_work_if_buffer_free_and_data_available( submit_mbox_work(dev_data); } - -static void mbox_callback_process(struct k_work *item) +#else +static void submit_if_buffer_free(struct icmsg_data_t *dev_data) { + mbox_callback_process(dev_data); +} + +static void submit_if_buffer_free_and_data_available( + struct icmsg_data_t *dev_data) +{ + + if (!data_available(dev_data)) { + return; + } + + mbox_callback_process(dev_data); +} +#endif + +#ifdef CONFIG_MULTITHREADING +static void mbox_callback_process(struct k_work *item) +#else +static void mbox_callback_process(struct icmsg_data_t *dev_data) +#endif +{ +#ifdef CONFIG_MULTITHREADING struct icmsg_data_t *dev_data = CONTAINER_OF(item, struct icmsg_data_t, mbox_work); +#endif atomic_t state = atomic_get(&dev_data->state); @@ -141,8 +192,7 @@ static void mbox_callback_process(struct k_work *item) if (state == ICMSG_STATE_READY) { if (dev_data->cb->received) { - dev_data->cb->received(rx_buffer, len, - dev_data->ctx); + dev_data->cb->received(rx_buffer, len, dev_data->ctx); } } else { __ASSERT_NO_MSG(state == ICMSG_STATE_BUSY); @@ -162,15 +212,22 @@ static void mbox_callback_process(struct k_work *item) atomic_set(&dev_data->state, ICMSG_STATE_READY); } - +#ifdef CONFIG_MULTITHREADING submit_work_if_buffer_free_and_data_available(dev_data); +#else + submit_if_buffer_free_and_data_available(dev_data); +#endif } static void mbox_callback(const struct device *instance, uint32_t channel, void *user_data, struct mbox_msg *msg_data) { struct icmsg_data_t *dev_data = user_data; +#ifdef CONFIG_MULTITHREADING submit_work_if_buffer_free(dev_data); +#else + submit_if_buffer_free(dev_data); +#endif } static int mbox_init(const struct icmsg_config_t *conf, @@ -178,8 +235,10 @@ static int mbox_init(const struct icmsg_config_t *conf, { int err; +#ifdef CONFIG_MULTITHREADING k_work_init(&dev_data->mbox_work, mbox_callback_process); k_work_init_delayable(&dev_data->notify_work, notify_process); +#endif err = mbox_register_callback_dt(&conf->mbox_rx, mbox_callback, dev_data); if (err != 0) { @@ -233,12 +292,14 @@ int icmsg_open(const struct icmsg_config_t *conf, if (ret) { return ret; } - +#ifdef CONFIG_MULTITHREADING ret = k_work_schedule_for_queue(workq, &dev_data->notify_work, K_NO_WAIT); if (ret < 0) { return ret; } - +#else + notify_process(dev_data); +#endif return 0; } @@ -263,7 +324,9 @@ int icmsg_send(const struct icmsg_config_t *conf, { int ret; int write_ret; +#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC int release_ret; +#endif int sent_bytes; if (!is_endpoint_ready(dev_data)) { @@ -275,14 +338,19 @@ int icmsg_send(const struct icmsg_config_t *conf, return -ENODATA; } +#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC ret = reserve_tx_buffer_if_unused(dev_data); if (ret < 0) { return -ENOBUFS; } +#endif write_ret = pbuf_write(dev_data->tx_pb, msg, len); + +#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC release_ret = release_tx_buffer(dev_data); __ASSERT_NO_MSG(!release_ret); +#endif if (write_ret < 0) { return write_ret;